Beispiel #1
0
def ca(source, destination, user=None, group=None):
    """
    Copy the Certificate Authority (CA) to the destination, creating parent
    directories if needed and assign owner if set. The tls layer installs the
    CA on all the peers in /usr/local/share/ca-certificates/.

    :param string source: The path to look or the certificate, if None the
    CA will be copied from the default location.
    :param string destination: The path to save the CA certificate.
    :param string user: The optional user name to own the CA certificate.
    :param string group: The optional group name to own the CA certificate.
    """
    _ensure_directory(destination, user, group)

    if not source:
        # When source not specified use the default CA path.
        source = \
            '/usr/local/share/ca-certificates/{0}.crt'.format(service_name())

    # Copy the ca certificate to the destination directory.
    copy2(source, destination)
    chown(destination, user, group)

    # Set the destination path for the ca certificate path on the unitdata.
    unitdata.kv().set('ca-cert-path', destination)
def ceph_changed(rid=None, unit=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    key = relation_get(attribute='key', rid=rid, unit=unit)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key:
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID, key=key)

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        # Ensure that nova-compute is restarted since only now can we
        # guarantee that ceph resources are ready, but only if not paused.
        if (not is_unit_paused_set() and
                not is_broker_action_done('nova_compute_restart', rid,
                                          unit)):
            service_restart('nova-compute')
            mark_broker_action_done('nova_compute_restart', rid, unit)
    else:
        send_request_if_needed(get_ceph_request())
    def __call__(self):
        """
        Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}
        service = service_name()
        os_codename = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(os_codename) >= "icehouse":
            volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
        else:
            volume_driver = 'cinder.volume.driver.RBDDriver'

        if config('rbd-pool-name'):
            pool_name = config('rbd-pool-name')
        else:
            pool_name = service
        section = {service: [('volume_backend_name', service),
                             ('volume_driver', volume_driver),
                             ('rbd_pool', pool_name),
                             ('rbd_user', service),
                             ('rbd_secret_uuid', leader_get('secret-uuid')),
                             ('rbd_ceph_conf', ceph_config_file())]}

        if CompareOpenStackReleases(os_codename) >= "mitaka":
            section[service].append(('report_discard_supported', True))

        if CompareOpenStackReleases(os_codename) >= "queens":
            section[service].append(('rbd_exclusive_cinder_pool', True))
            section[service].append(
                ('rbd_flatten_volume_from_snapshot',
                 config('rbd-flatten-volume-from-snapshot')))

        return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}}
def emit_cephconf():

    cephcontext = {
        'mon_hosts': config('monitor-hosts'),
        'fsid': config('fsid'),
        'use_syslog': str(config('use-syslog')).lower(),
        'loglevel': config('loglevel'),
    }

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
    keyring = 'ceph.client.admin.keyring'
    keyring_path = '/etc/ceph/' + keyring
    render(keyring, keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600)

    keyring = 'keyring'
    keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring
    render('mon.keyring', keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600)

    notify_radosgws()
    notify_client()
Beispiel #5
0
def emit_cephconf():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'):
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=relation_get('key'))

    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_imagebackend_allowed()):
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            # Ensure that nova-compute is restarted since only now can we
            # guarantee that ceph resources are ready.
            service_restart('nova-compute')
        else:
            send_request_if_needed(get_ceph_request())
Beispiel #7
0
def emit_cephconf():
    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': config('ceph-public-network'),
        'ceph_cluster_network': config('ceph-cluster-network'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not config('ceph-public-network'):
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not config('ceph-cluster-network'):
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf))
    with open(charm_ceph_conf, 'w') as cephconf:
        cephconf.write(render_template('ceph.conf', cephcontext))
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
Beispiel #8
0
def send_client_all_info(client):
    """Send clients (plugin, RM, non-DNs) all dfs relation data.

    At this point, the namenode is ready to serve clients. Send all
    dfs relation data so that our 'namenode.ready' state becomes set.
    """
    bigtop = Bigtop()
    fqdn = get_fqdn()
    hdfs_port = get_layer_opts().port('namenode')
    webhdfs_port = get_layer_opts().port('nn_webapp_http')

    client.send_spec(bigtop.spec())
    client.send_namenodes([fqdn])
    client.send_ports(hdfs_port, webhdfs_port)
    # namenode.ready implies we have at least 1 datanode, which means hdfs
    # is ready for use. Inform clients of that with send_ready().
    if is_state('apache-bigtop-namenode.ready'):
        client.send_ready(True)
    else:
        client.send_ready(False)

    # hosts_map and clustername are required by the dfs interface to signify
    # NN's readiness. Send it, even though they are not utilized by bigtop.
    client.send_hosts_map(utils.get_kv_hosts())
    client.send_clustername(hookenv.service_name())
Beispiel #9
0
def send_dn_all_info(datanode):
    """Send datanodes all dfs-slave relation data.

    At this point, the namenode is ready to serve datanodes. Send all
    dfs-slave relation data so that our 'namenode.ready' state becomes set.
    """
    bigtop = Bigtop()
    fqdn = get_fqdn()
    hdfs_port = get_layer_opts().port('namenode')
    webhdfs_port = get_layer_opts().port('nn_webapp_http')

    datanode.send_spec(bigtop.spec())
    datanode.send_namenodes([fqdn])
    datanode.send_ports(hdfs_port, webhdfs_port)

    # hosts_map, ssh_key, and clustername are required by the dfs-slave
    # interface to signify NN's readiness. Send them, even though they are not
    # utilized by bigtop.
    # NB: update KV hosts with all datanodes prior to sending the hosts_map
    # because dfs-slave gates readiness on a DN's presence in the hosts_map.
    utils.update_kv_hosts(datanode.hosts_map())
    datanode.send_hosts_map(utils.get_kv_hosts())
    datanode.send_ssh_key('invalid')
    datanode.send_clustername(hookenv.service_name())

    # update status with slave count and report ready for hdfs
    num_slaves = len(datanode.nodes())
    hookenv.status_set('active', 'ready ({count} datanode{s})'.format(
        count=num_slaves,
        s='s' if num_slaves > 1 else '',
    ))
    set_state('apache-bigtop-namenode.ready')
    def __call__(self):
        ctxt = super(NovaComputeCephContext, self).__call__()
        if not ctxt:
            return {}
        svc = service_name()
        # secret.xml
        ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID
        # nova.conf
        ctxt['service_name'] = svc
        ctxt['rbd_user'] = svc
        ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID
        ctxt['rbd_pool'] = config('rbd-pool')

        if (config('libvirt-image-backend') == 'rbd' and
                assert_libvirt_rbd_imagebackend_allowed()):
            ctxt['libvirt_rbd_images_ceph_conf'] = ceph_config_file()

        rbd_cache = config('rbd-client-cache') or ""
        if rbd_cache.lower() == "enabled":
            # We use write-though only to be safe for migration
            ctxt['rbd_client_cache_settings'] = \
                {'rbd cache': 'true',
                 'rbd cache size': '64 MiB',
                 'rbd cache max dirty': '0 MiB',
                 'rbd cache writethrough until flush': 'true',
                 'admin socket': '/var/run/ceph/rbd-client-$pid.asok'}

            asok_path = '/var/run/ceph/'
            if not os.path.isdir(asok_path):
                os.mkdir(asok_path)

        elif rbd_cache.lower() == "disabled":
            ctxt['rbd_client_cache_settings'] = {'rbd cache': 'false'}

        return ctxt
Beispiel #11
0
def apply_node_labels():
    ''' Parse the labels configuration option and apply the labels to the node.
    '''
    # scrub and try to format an array from the configuration option
    config = hookenv.config()
    user_labels = _parse_labels(config.get('labels'))

    # For diffing sake, iterate the previous label set
    if config.previous('labels'):
        previous_labels = _parse_labels(config.previous('labels'))
        hookenv.log('previous labels: {}'.format(previous_labels))
    else:
        # this handles first time run if there is no previous labels config
        previous_labels = _parse_labels("")

    # Calculate label removal
    for label in previous_labels:
        if label not in user_labels:
            hookenv.log('Deleting node label {}'.format(label))
            _apply_node_label(label, delete=True)
        # if the label is in user labels we do nothing here, it will get set
        # during the atomic update below.

    # Atomically set a label
    for label in user_labels:
        _apply_node_label(label, overwrite=True)

    # Set label for application name
    _apply_node_label('juju-application={}'.format(hookenv.service_name()),
                      overwrite=True)
 def __call__(self):
     """Used to generate template context to be added to cinder.conf in the
     presence of a ceph relation.
     """
     # TODO(this should call is_relation_made)
     if not relation_ids('ceph'):
         return {}
     service = service_name()
     cmp_os_release = CompareOpenStackReleases(os_release('cinder-common'))
     if cmp_os_release >= "icehouse":
         volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
     else:
         volume_driver = 'cinder.volume.driver.RBDDriver'
     if cmp_os_release >= "ocata":
         driver_key = 'ceph_volume_driver'
     else:
         driver_key = 'volume_driver'
     return {
         driver_key: volume_driver,
         # ensure_ceph_pool() creates pool based on service name.
         'rbd_pool': service,
         'rbd_user': service,
         'host': service,
         'rbd_ceph_conf': ceph_config_file()
     }
Beispiel #13
0
 def register_journalnodes(self, nodes, port):
     clustername = hookenv.service_name()
     hdfs_site = self.hadoop_base.dist_config.path('hadoop_conf') / 'hdfs-site.xml'
     with utils.xmlpropmap_edit_in_place(hdfs_site) as props:
         props['dfs.namenode.shared.edits.dir'] = 'qjournal://{}/{}'.format(
             ';'.join(['%s:%s' % (host, port) for host in nodes]),
             clustername)
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    weight = config('ceph-pool-weight')
    rq.add_op_create_pool(name=service, replica_count=replicas,
                          weight=weight)
    return rq
Beispiel #15
0
def configure_git(git):
    status_set('maintenance', 'Configuring ssh and git')
    username = service_name()
    if not os.path.exists(SSH_IDENTITY):
        subprocess.check_call(['ssh-keygen', '-P', '', '-f', SSH_IDENTITY])
    public_key = open(SSH_IDENTITY + '.pub').read()
    git.configure(username, public_key)
    set_state('git.configured')
def emit_cephconf():
    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, get_ceph_context(), perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
Beispiel #17
0
 def add_op_request_access_to_group(self, name, namespace=None,
                                    permission=None, key_name=None):
     """
     Adds the requested permissions to the current service's Ceph key,
     allowing the key to access only the specified pools
     """
     self.ops.append({'op': 'add-permissions-to-key', 'group': name,
                      'namespace': namespace, 'name': key_name or service_name(),
                      'group-permission': permission})
Beispiel #18
0
def get_cassandra_rackdc():
    c = config()
    datacenter = c['datacenter'].strip()
    rack = c['rack'].strip() or hookenv.service_name()
    rackdc_properties = dedent('''\
                               dc={}
                               rack={}
                               ''').format(datacenter, rack)
    return rackdc_properties
def send_info(datanode):
    hadoop = get_hadoop_base()
    hdfs_port = hadoop.dist_config.port('namenode')
    webhdfs_port = hadoop.dist_config.port('nn_webapp_http')

    datanode.send_spec(hadoop.spec())
    datanode.send_clustername(hookenv.service_name())
    datanode.send_namenodes(get_cluster_nodes())
    datanode.send_ports(hdfs_port, webhdfs_port)
Beispiel #20
0
 def format_namenode(self):
     if unitdata.kv().get('hdfs.namenode.formatted'):
         return
     self.stop_namenode()
     # Run without prompting; this will fail if the namenode has already
     # been formatted -- we do not want to reformat existing data!
     clusterid = hookenv.service_name()
     self._hdfs('namenode', '-format', '-noninteractive', '-clusterid', clusterid)
     unitdata.kv().set('hdfs.namenode.formatted', True)
     unitdata.kv().flush(True)
def storage_backend(rel_id=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
    else:
        relation_set(
            relation_id=rel_id,
            backend_name=service_name(),
            subordinate_configuration=json.dumps(CephSubordinateContext()()),
            stateless=True,
        )
def configure_cassandra_rackdc():
    config = hookenv.config()
    datacenter = config['datacenter'].strip()
    rack = config['rack'].strip() or hookenv.service_name()
    rackdc_properties = dedent('''\
                               dc={}
                               rack={}
                               ''').format(datacenter, rack)
    rackdc_path = helpers.get_cassandra_rackdc_file()
    host.write_file(rackdc_path, rackdc_properties.encode('UTF-8'))
Beispiel #23
0
 def configure_namenode(self, namenodes):
     dc = self.hadoop_base.dist_config
     clustername = hookenv.service_name()
     host = hookenv.local_unit().replace('/', '-')
     self.configure_hdfs_base(clustername, namenodes, dc.port('namenode'), dc.port('nn_webapp_http'))
     hdfs_site = dc.path('hadoop_conf') / 'hdfs-site.xml'
     with utils.xmlpropmap_edit_in_place(hdfs_site) as props:
         props['dfs.namenode.datanode.registration.ip-hostname-check'] = 'true'
         props['dfs.namenode.http-address.%s.%s' % (clustername, host)] = '%s:%s' % (host, dc.port('nn_webapp_http'))
     self.hadoop_base.setup_init_script("hdfs", "namenode")
Beispiel #24
0
def juju_state_to_yaml(yaml_path, namespace_separator=':',
                       allow_hyphens_in_keys=True, mode=None):
    """Update the juju config and state in a yaml file.

    This includes any current relation-get data, and the charm
    directory.

    This function was created for the ansible and saltstack
    support, as those libraries can use a yaml file to supply
    context to templates, but it may be useful generally to
    create and update an on-disk cache of all the config, including
    previous relation data.

    By default, hyphens are allowed in keys as this is supported
    by yaml, but for tools like ansible, hyphens are not valid [1].

    [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
    """
    config = hookenv.config()

    config['charm_dir'] = os.environ.get('CHARM_DIR', '')
    config['local_unit'] = hookenv.local_unit()
    config['service_name'] = hookenv.service_name()
    config['unit_private_address'] = hookenv.unit_private_ip()
    config['unit_public_address'] = hookenv.unit_get('public-address')

    # Don't use non-standard tags for unicode which will not
    # work when salt uses yaml.load_safe.
    yaml.add_representer(six.text_type,
                         lambda dumper, value: dumper.represent_scalar(
                             six.u('tag:yaml.org,2002:str'), value))

    yaml_dir = os.path.dirname(yaml_path)
    if not os.path.exists(yaml_dir):
        os.makedirs(yaml_dir)

    if os.path.exists(yaml_path):
        with open(yaml_path, "r") as existing_vars_file:
            existing_vars = yaml.load(existing_vars_file.read())
    else:
        with open(yaml_path, "w+"):
            pass
        existing_vars = {}

    if mode is not None:
        os.chmod(yaml_path, mode)

    if not allow_hyphens_in_keys:
        config = dict_keys_without_hyphens(config)
    existing_vars.update(config)

    update_relations(existing_vars, namespace_separator)

    with open(yaml_path, "w+") as fp:
        fp.write(yaml.dump(existing_vars, default_flow_style=False))
def accept_clients(clients):
    hadoop = get_hadoop_base()
    hdfs_port = hadoop.dist_config.port('namenode')
    webhdfs_port = hadoop.dist_config.port('nn_webapp_http')

    clients.send_spec(hadoop.spec())
    clients.send_clustername(hookenv.service_name())
    clients.send_namenodes(get_cluster_nodes())
    clients.send_ports(hdfs_port, webhdfs_port)
    clients.send_hosts_map(utils.get_kv_hosts())
    clients.send_ready(True)
Beispiel #26
0
def emit_cephconf(upgrading=False):
    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    context = get_ceph_context(upgrading)
    write_file(charm_ceph_conf, render_template('ceph.conf', context),
               ceph.ceph_user(), ceph.ceph_user(), 0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 90)
Beispiel #27
0
def install_ca(certificate_authority):
    """Install a certificiate authority on the system."""
    ca_file = "/usr/local/share/ca-certificates/{0}.crt".format(hookenv.service_name())
    hookenv.log("Writing CA to {0}".format(ca_file))
    # Write the contents of certificate authority to the file.
    with open(ca_file, "w") as fp:
        fp.write(certificate_authority)
    # Update the trusted CAs on this system.
    check_call(["update-ca-certificates"])
    # Notify other layers that the certificate authority is available.
    set_state("tls.certificate.authority available")
Beispiel #28
0
def missing_kube_control():
    """Inform the operator they need to add the kube-control relation.

    If deploying via bundle this won't happen, but if operator is upgrading a
    a charm in a deployment that pre-dates the kube-control relation, it'll be
    missing.

    """
    hookenv.status_set(
        'blocked',
        'Relate {}:kube-control kubernetes-master:kube-control'.format(
            hookenv.service_name()))
def get_rabbit_password_on_disk(username, password=None, local=False):
    ''' Retrieve, generate or store a rabbit password for
    the provided username on disk'''
    if local:
        _passwd_file = _local_named_passwd.format(service_name(), username)
    else:
        _passwd_file = _named_passwd.format(service_name(), username)

    _password = None
    if os.path.exists(_passwd_file):
        with open(_passwd_file, 'r') as passwd:
            _password = passwd.read().strip()
    else:
        mkdir(os.path.dirname(_passwd_file), owner=RABBIT_USER,
              group=RABBIT_USER, perms=0o775)
        os.chmod(os.path.dirname(_passwd_file), 0o775)
        _password = password or pwgen(length=64)
        write_file(_passwd_file, _password, owner=RABBIT_USER,
                   group=RABBIT_USER, perms=0o660)

    return _password
def migrate_passwords_to_peer_relation():
    '''Migrate any passwords storage on disk to cluster peer relation'''
    for f in glob.glob('/var/lib/charm/{}/*.passwd'.format(service_name())):
        _key = os.path.basename(f)
        with open(f, 'r') as passwd:
            _value = passwd.read().strip()
        try:
            peer_store(_key, _value)
            os.unlink(f)
        except ValueError:
            # NOTE cluster relation not yet ready - skip for now
            pass
def render_and_launch_ingress():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'
    context['juju_application'] = hookenv.service_name()

    # If present, workers will get the ingress containers from the configured
    # registry. Otherwise, we'll set an appropriate upstream image registry.
    registry_location = get_registry_location()

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == "" or
       context['defaultbackend_image'] == "auto"):
        if registry_location:
            backend_registry = registry_location
        else:
            backend_registry = 'k8s.gcr.io'
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "{}/defaultbackend-s390x:1.4".format(backend_registry)
        elif context['arch'] == 'arm64':
            context['defaultbackend_image'] = \
                "{}/defaultbackend-arm64:1.5".format(backend_registry)
        else:
            context['defaultbackend_image'] = \
                "{}/defaultbackend-amd64:1.5".format(backend_registry)

    # Render the ingress daemon set controller manifest
    context['ssl_chain_completion'] = config.get(
        'ingress-ssl-chain-completion')
    context['enable_ssl_passthrough'] = config.get(
        'ingress-ssl-passthrough')
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        if registry_location:
            nginx_registry = registry_location
        else:
            nginx_registry = 'quay.io'
        images = {'amd64': 'kubernetes-ingress-controller/nginx-ingress-controller-amd64:0.25.1',  # noqa
                  'arm64': 'kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.25.1',  # noqa
                  's390x': 'kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.20.0',  # noqa
                  'ppc64el': 'kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.20.0',  # noqa
                 }
        context['ingress_image'] = '{}/{}'.format(nginx_registry,
                                                  images.get(context['arch'],
                                                             images['amd64']))

    kubelet_version = get_version('kubelet')
    if kubelet_version < (1, 9):
        context['daemonset_api_version'] = 'extensions/v1beta1'
        context['deployment_api_version'] = 'extensions/v1beta1'
    elif kubelet_version < (1, 16):
        context['daemonset_api_version'] = 'apps/v1beta2'
        context['deployment_api_version'] = 'extensions/v1beta1'
    else:
        context['daemonset_api_version'] = 'apps/v1'
        context['deployment_api_version'] = 'apps/v1'
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the default http backend (404) deployment manifest
    # needs to happen after ingress-daemon-set since that sets up the namespace
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create default-http-backend. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
Beispiel #32
0
def start_charm():
    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    namespace = os.environ["JUJU_MODEL_NAME"]
    port = hookenv.config("port")

    # Talk to the K8s API to read the auto-generated webhook certificate secret.
    # Borrow the env vars from the root process that let the Kubernetes
    # client automatically look up connection info, since `load_incluster_config`
    # for whatever reason doesn't support loading the serviceaccount token from disk.
    os.environ.update(
        dict(
            e.split("=")
            for e in Path("/proc/1/environ").read_text().split("\x00")
            if "KUBERNETES_SERVICE" in e))

    config.load_incluster_config()
    v1 = client.CoreV1Api()
    layer.status.maintenance(
        'Waiting for secrets/cert-manager-webhook-tls to be created')
    for _ in range(30):
        try:
            secret = v1.read_namespaced_secret(name="cert-manager-webhook-tls",
                                               namespace=namespace)
            break
        except client.rest.ApiException as err:
            hookenv.log(err)
            time.sleep(10)
    else:
        layer.status.blocked('cert-manager-webhook-tls certificate not found.')
        return False

    layer.caas_base.pod_spec_set(
        {
            "version":
            2,
            "serviceAccount": {
                "global":
                True,
                "rules": [
                    {
                        "apiGroups": ["admission.cert-manager.io"],
                        "resources": [
                            "certificates",
                            "certificaterequests",
                            "issuers",
                            "clusterissuers",
                        ],
                        "verbs": ["create"],
                    },
                    {
                        "apiGroups": [""],
                        "resources": ["configmaps"],
                        "verbs": ["get"]
                    },
                ],
            },
            "containers": [{
                "name":
                "cert-manager-webhook",
                "imageDetails": {
                    "imagePath": image_info.registry_path,
                    "username": image_info.username,
                    "password": image_info.password,
                },
                "args": [
                    "--v=2",
                    f"--secure-port={port}",
                    "--tls-cert-file=/certs/tls.crt",
                    "--tls-private-key-file=/certs/tls.key",
                ],
                "ports": [{
                    "name": "https",
                    "containerPort": port
                }],
                "config": {
                    "POD_NAMESPACE": namespace
                },
                "files": [{
                    "name": "certs",
                    "mountPath": "/certs",
                    "files": {
                        "tls.crt":
                        b64decode(secret.data['tls.crt']).decode('utf-8'),
                        "tls.key":
                        b64decode(secret.data['tls.key']).decode('utf-8'),
                    },
                }],
            }],
        },
        k8s_resources={
            "kubernetesResources": {
                "mutatingWebhookConfigurations": {
                    "cert-manager-webhook": [{
                        "name":
                        "webhook.cert-manager.io",
                        "rules": [{
                            "apiGroups": ["cert-manager.io"],
                            "apiVersions": ["v1alpha2"],
                            "operations": ["CREATE", "UPDATE"],
                            "resources": [
                                "certificates",
                                "issuers",
                                "clusterissuers",
                                "orders",
                                "challenges",
                                "certificaterequests",
                            ],
                        }],
                        "failurePolicy":
                        "Fail",
                        "clientConfig": {
                            "service": {
                                "name": hookenv.service_name(),
                                "namespace": namespace,
                                "path":
                                "/apis/webhook.cert-manager.io/v1beta1/mutations",
                                "port": port,
                            },
                            "caBundle": secret.data['tls.crt'],
                        },
                    }]
                },
                "validatingWebhookConfigurations": {
                    "cert-manager-webhook": [{
                        "name":
                        "webhook.certmanager.k8s.io",
                        "rules": [{
                            "apiGroups": ["cert-manager.io"],
                            "apiVersions": ["v1alpha2"],
                            "operations": ["CREATE", "UPDATE"],
                            "resources": [
                                "certificates",
                                "issuers",
                                "clusterissuers",
                                "certificaterequests",
                            ],
                        }],
                        "failurePolicy":
                        "Fail",
                        "sideEffects":
                        "None",
                        "clientConfig": {
                            "service": {
                                "name": hookenv.service_name(),
                                "namespace": namespace,
                                "path":
                                "/apis/webhook.cert-manager.io/v1beta1/validations",
                                "port": port,
                            },
                            "caBundle": secret.data['tls.crt'],
                        },
                    }]
                },
            }
        },
    )

    layer.status.maintenance("creating container")
    set_flag("charm.started")
def configure_mesh():
    endpoint_from_name('service-mesh').add_route(
        prefix='/jupyter', service=hookenv.service_name(), port=hookenv.config('port')
    )
def configure_domain_name(domain):
    domain.domain_name(hookenv.config('domain-name') or
                       hookenv.service_name())
    flags.set_flag('domain-name-configured')
Beispiel #35
0
 def rabbit_client_cert_dir(self):
     return '/var/lib/charm/{}'.format(hookenv.service_name())
Beispiel #36
0
 def test_gets_service_name(self, _unit):
     _unit.return_value = 'mysql/3'
     self.assertEqual(hookenv.service_name(), 'mysql')
Beispiel #37
0
def start_charm():
    layer.status.maintenance('configuring container')

    config = hookenv.config()
    conf_dir = '/etc/config'
    conf_file = 'controller_config_file.yaml'
    conf_path = '/'.join([conf_dir, conf_file])
    image_info = layer.docker_resource.get_info('pytorch-operator-image')

    conf_data = {}
    if config['pytorch-default-image']:
        conf_data['pytorchImage'] = config['pytorch-default-image']

    layer.caas_base.pod_spec_set({
        'containers': [
            {
                'name':
                'pytorch-operator',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': [
                    '/pytorch-operator',
                    '--controller-config-file={}'.format(conf_path),
                    '--alsologtostderr',
                    '-v=1',
                ],
                'ports': [
                    {
                        'name': 'dummy',
                        'containerPort': 9999,
                    },
                ],
                'config': {
                    'MY_POD_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                    'MY_POD_NAME': hookenv.service_name(),
                },
                'files': [
                    {
                        'name': 'configs',
                        'mountPath': conf_dir,
                        'files': {
                            conf_file: yaml.dump(conf_data),
                        },
                    },
                ],
            },
        ],
        'customResourceDefinition': [
            {
                'group': 'kubeflow.org',
                'version': 'v1alpha1',
                'scope': 'Namespaced',
                'kind': 'PyTorchJob',
                'validation': {}
            },
        ],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.kubeflow-pytorch-operator.started')
Beispiel #38
0
def ca_file_path(arg):
    file_path = os.path.join(ch_host.CA_CERT_DIR,
                             "{}.crt".format(ch_hookenv.service_name()))
    if os.path.exists(file_path):
        return file_path
    return ''
def start_charm():
    layer.status.maintenance('configuring container')

    config = hookenv.config()
    image_info = layer.docker_resource.get_info('jupyterhub-image')
    application_name = hookenv.service_name()
    jh_config_src = Path('files/jupyterhub_config.py')
    jh_config_dst = Path('/etc/config/jupyterhub_config.py')

    layer.caas_base.pod_spec_set({
        'containers': [
            {
                'name':
                'tf-hub',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': [
                    'jupyterhub',
                    '-f',
                    str(jh_config_dst),
                ],
                'ports': [
                    {
                        'name': 'hub',
                        'containerPort': 8000,
                    },
                    {
                        'name': 'api',
                        'containerPort': 8081,
                    },
                ],
                'config': {
                    # we have to explicitly specify the k8s service name for
                    # use in the API URL because otherwise JupyterHub uses the
                    # pod name, which in our case doesn't just happen to match
                    # the service name; the k8s service name will always be the
                    # application name with a "juju-" prefix
                    'K8S_SERVICE_NAME': 'juju-{}'.format(application_name),
                    'AUTHENTICATOR': config['authenticator'],
                    'NOTEBOOK_STORAGE_SIZE': config['notebook-storage-size'],
                    'NOTEBOOK_STORAGE_CLASS': config['notebook-storage-class'],
                    'CLOUD_NAME': '',  # is there a way to detect this?
                    'REGISTRY': config['notebook-image-registry'],
                    'REPO_NAME': config['notebook-image-repo-name'],
                },
                'files': [
                    {
                        'name': 'configs',
                        'mountPath': str(jh_config_dst.parent),
                        'files': {
                            'jupyterhub_config.py': jh_config_src.read_text(),
                        },
                    },
                ],
            },
        ],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.kubeflow-tf-hub.started')
Beispiel #40
0
def get_ceph_request():
    service = service_name()
    rq = CephBrokerRq()
    replicas = config('ceph-osd-replication-count')
    rq.add_op_create_pool(name=service, replica_count=replicas)
    return rq
Beispiel #41
0
def get_ceph_request():
    service = service_name()
    if config('rbd-pool-name'):
        pool_name = config('rbd-pool-name')
    else:
        pool_name = service

    rq = CephBrokerRq()
    weight = config('ceph-pool-weight')
    replicas = config('ceph-osd-replication-count')
    bluestore_compression = CephBlueStoreCompressionContext()

    if config('pool-type') == 'erasure-coded':
        # General EC plugin config
        plugin = config('ec-profile-plugin')
        technique = config('ec-profile-technique')
        device_class = config('ec-profile-device-class')
        metadata_pool_name = (config('ec-rbd-metadata-pool')
                              or "{}-metadata".format(service))
        bdm_k = config('ec-profile-k')
        bdm_m = config('ec-profile-m')
        # LRC plugin config
        bdm_l = config('ec-profile-locality')
        crush_locality = config('ec-profile-crush-locality')
        # SHEC plugin config
        bdm_c = config('ec-profile-durability-estimator')
        # CLAY plugin config
        bdm_d = config('ec-profile-helper-chunks')
        scalar_mds = config('ec-profile-scalar-mds')
        # Profile name
        profile_name = (config('ec-profile-name')
                        or "{}-profile".format(service))
        # Metadata sizing is approximately 1% of overall data weight
        # but is in effect driven by the number of rbd's rather than
        # their size - so it can be very lightweight.
        metadata_weight = weight * 0.01
        # Resize data pool weight to accomodate metadata weight
        weight = weight - metadata_weight
        # Create metadata pool
        rq.add_op_create_pool(name=metadata_pool_name,
                              replica_count=replicas,
                              weight=metadata_weight,
                              group='images',
                              app_name='rbd')

        # Create erasure profile
        rq.add_op_create_erasure_profile(name=profile_name,
                                         k=bdm_k,
                                         m=bdm_m,
                                         lrc_locality=bdm_l,
                                         lrc_crush_locality=crush_locality,
                                         shec_durability_estimator=bdm_c,
                                         clay_helper_chunks=bdm_d,
                                         clay_scalar_mds=scalar_mds,
                                         device_class=device_class,
                                         erasure_type=plugin,
                                         erasure_technique=technique)

        # Create EC data pool

        # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
        # the unpacking of the BlueStore compression arguments as part of
        # the function arguments. Until then we need to build the dict
        # prior to the function call.
        kwargs = {
            'name': pool_name,
            'erasure_profile': profile_name,
            'weight': weight,
            'group': "images",
            'app_name': "rbd",
            'allow_ec_overwrites': True,
        }
        kwargs.update(bluestore_compression.get_kwargs())
        rq.add_op_create_erasure_pool(**kwargs)
    else:
        # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
        # the unpacking of the BlueStore compression arguments as part of
        # the function arguments. Until then we need to build the dict
        # prior to the function call.
        kwargs = {
            'name': pool_name,
            'replica_count': replicas,
            'weight': weight,
            'group': 'images',
            'app_name': 'rbd',
        }
        kwargs.update(bluestore_compression.get_kwargs())
        rq.add_op_create_replicated_pool(**kwargs)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
Beispiel #42
0
def start_charm():
    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')
    service_name = hookenv.service_name()

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'global':
            True,
            'rules': [
                {
                    'apiGroups': [''],
                    'resources': ['pods', 'pods/exec', 'pods/log'],
                    'verbs': ['get', 'list', 'watch'],
                },
                {
                    'apiGroups': [''],
                    'resources': ['secrets'],
                    'verbs': ['get']
                },
                {
                    'apiGroups': ['argoproj.io'],
                    'resources': ['workflows', 'workflows/finalizers'],
                    'verbs': ['get', 'list', 'watch'],
                },
            ],
        },
        'service': {
            'annotations': {
                'getambassador.io/config':
                yaml.dump_all([{
                    'apiVersion': 'ambassador/v0',
                    'kind': 'Mapping',
                    'name': 'argo-ui',
                    'prefix': '/argo/',
                    'service': f'{service_name}:{port}',
                    'timeout_ms': 30000,
                }])
            }
        },
        'containers': [{
            'name': 'argo-ui',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'config': {
                'ARGO_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                'IN_CLUSTER': 'true',
                'BASE_HREF': '/argo/',
            },
            'ports': [{
                'name': 'http-ui',
                'containerPort': port
            }],
            'kubernetes': {
                'readinessProbe': {
                    'httpGet': {
                        'path': '/',
                        'port': port
                    }
                }
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == "" or
       context['defaultbackend_image'] == "auto"):
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend-s390x:1.4"
        elif context['arch'] == 'arm64':
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend-arm64:1.4"
        else:
            context['defaultbackend_image'] = \
                "k8s.gcr.io/defaultbackend:1.4"

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create default-http-backend. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress daemon set controller manifest
    context['ssl_chain_completion'] = config.get(
        'ingress-ssl-chain-completion')
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1', # noqa
                  'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1', # noqa
                  's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1', # noqa
                  'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1', # noqa
                  }
        context['ingress_image'] = images.get(context['arch'], images['amd64'])
    if get_version('kubelet') < (1, 9):
        context['daemonset_api_version'] = 'extensions/v1beta1'
    else:
        context['daemonset_api_version'] = 'apps/v1beta2'
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log('Failed to create ingress controller. Will attempt again next update.')  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
Beispiel #44
0
def start_charm():
    layer.status.maintenance("configuring container")

    namespace = os.environ["JUJU_MODEL_NAME"]
    config = dict(hookenv.config())

    service_mesh = endpoint_from_name('service-mesh')
    routes = service_mesh.routes()

    try:
        auth_route = next(r for r in routes if r['auth'])
    except StopIteration:
        auth_route = None

    # See https://bugs.launchpad.net/juju/+bug/1900475 for why this isn't inlined below
    if routes:
        custom_resources = {
            'gateways.networking.istio.io': [
                {
                    'apiVersion': 'networking.istio.io/v1beta1',
                    'kind': 'Gateway',
                    'metadata': {
                        'name': config['default-gateway'],
                    },
                    'spec': {
                        'selector': {'istio': 'ingressgateway'},
                        'servers': [
                            {
                                'hosts': ['*'],
                                'port': {'name': 'http', 'number': 80, 'protocol': 'HTTP'},
                            }
                        ],
                    },
                }
            ],
            'virtualservices.networking.istio.io': [
                {
                    'apiVersion': 'networking.istio.io/v1alpha3',
                    'kind': 'VirtualService',
                    'metadata': {'name': route['service']},
                    'spec': {
                        'gateways': [f'{namespace}/{config["default-gateway"]}'],
                        'hosts': ['*'],
                        'http': [
                            {
                                'match': [{'uri': {'prefix': route['prefix']}}],
                                'rewrite': {'uri': route['rewrite']},
                                'route': [
                                    {
                                        'destination': {
                                            'host': f'{route["service"]}.{namespace}.svc.cluster.local',
                                            'port': {'number': route['port']},
                                        }
                                    }
                                ],
                            }
                        ],
                    },
                }
                for route in service_mesh.routes()
            ],
        }
    else:
        custom_resources = {}

    if auth_route:
        request_headers = [{'exact': h} for h in auth_route['auth']['request_headers']]
        response_headers = [{'exact': h} for h in auth_route['auth']['response_headers']]
        custom_resources['rbacconfigs.rbac.istio.io'] = [
            {
                'apiVersion': 'rbac.istio.io/v1alpha1',
                'kind': 'RbacConfig',
                'metadata': {'name': 'default'},
                'spec': {'mode': 'OFF'},
            }
        ]
        custom_resources['envoyfilters.networking.istio.io'] = [
            {
                'apiVersion': 'networking.istio.io/v1alpha3',
                'kind': 'EnvoyFilter',
                'metadata': {'name': 'authn-filter'},
                'spec': {
                    'filters': [
                        {
                            'filterConfig': {
                                'httpService': {
                                    'authorizationRequest': {
                                        'allowedHeaders': {
                                            'patterns': request_headers,
                                        }
                                    },
                                    'authorizationResponse': {
                                        'allowedUpstreamHeaders': {
                                            'patterns': response_headers,
                                        },
                                    },
                                    'serverUri': {
                                        'cluster': f'outbound|{auth_route["port"]}||{auth_route["service"]}.{namespace}.svc.cluster.local',
                                        'failureModeAllow': False,
                                        'timeout': '10s',
                                        'uri': f'http://{auth_route["service"]}.{namespace}.svc.cluster.local:{auth_route["port"]}',
                                    },
                                }
                            },
                            'filterName': 'envoy.ext_authz',
                            'filterType': 'HTTP',
                            'insertPosition': {'index': 'FIRST'},
                            'listenerMatch': {'listenerType': 'GATEWAY'},
                        }
                    ],
                    'workloadLabels': {
                        'istio': 'ingressgateway',
                    },
                },
            }
        ]

    image = layer.docker_resource.get_info("oci-image")
    tconfig = {k.replace('-', '_'): v for k, v in config.items()}
    tconfig['service_name'] = hookenv.service_name()
    tconfig['namespace'] = namespace
    env = Environment(
        loader=FileSystemLoader('templates'), variable_start_string='[[', variable_end_string=']]'
    )

    layer.caas_base.pod_spec_set(
        {
            "version": 3,
            "serviceAccount": {
                "roles": [
                    {
                        "global": True,
                        "rules": [
                            {"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]},
                            {"nonResourceURLs": ["*"], "verbs": ["*"]},
                        ],
                    }
                ]
            },
            "containers": [
                {
                    "name": "discovery",
                    "args": [
                        "discovery",
                        f"--monitoringAddr={config['monitoring-address']}",
                        "--log_output_level=all:debug",
                        "--domain",
                        "cluster.local",
                        f"--secureGrpcAddr={config['secure-grpc-address']}",
                        "--trust-domain=cluster.local",
                        "--keepaliveMaxServerConnectionAge",
                        "30m",
                        "--disable-install-crds=true",
                    ],
                    "imageDetails": {
                        "imagePath": image.registry_path,
                        "username": image.username,
                        "password": image.password,
                    },
                    "envConfig": {
                        "JWT_POLICY": "first-party-jwt",
                        "PILOT_CERT_PROVIDER": "istiod",
                        "POD_NAME": {"field": {"path": "metadata.name", "api-version": "v1"}},
                        "POD_NAMESPACE": namespace,
                        "SERVICE_ACCOUNT": {
                            "field": {"path": "spec.serviceAccountName", "api-version": "v1"}
                        },
                        "PILOT_TRACE_SAMPLING": "1",
                        "CONFIG_NAMESPACE": "istio-config",
                        "PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND": "true",
                        "PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND": "false",
                        "INJECTION_WEBHOOK_CONFIG_NAME": f"{namespace}-sidecar-injector",
                        "ISTIOD_ADDR": f"{hookenv.service_name()}.{namespace}.svc:{config['xds-ca-tls-port']}",
                        "PILOT_EXTERNAL_GALLEY": "false",
                    },
                    "ports": [
                        {"name": "debug", "containerPort": config['debug-port']},
                        {"name": "grpc-xds", "containerPort": config['xds-ca-port']},
                        {"name": "xds", "containerPort": config["xds-ca-tls-port"]},
                        {"name": "webhook", "containerPort": config['webhook-port']},
                    ],
                    "kubernetes": {
                        "readinessProbe": {
                            "failureThreshold": 3,
                            "httpGet": {
                                "path": "/ready",
                                "port": config['debug-port'],
                                "scheme": "HTTP",
                            },
                            "initialDelaySeconds": 5,
                            "periodSeconds": 5,
                            "successThreshold": 1,
                            "timeoutSeconds": 5,
                        },
                    },
                    "volumeConfig": [
                        {
                            "name": "config-volume",
                            "mountPath": "/etc/istio/config",
                            "files": [
                                {
                                    "path": "mesh",
                                    "content": env.get_template('mesh').render(tconfig),
                                },
                                {
                                    "path": "meshNetworks",
                                    "content": env.get_template('meshNetworks').render(tconfig),
                                },
                                {
                                    "path": "values.yaml",
                                    "content": env.get_template('values.yaml').render(tconfig),
                                },
                            ],
                        },
                        {
                            "name": "local-certs",
                            "mountPath": "/var/run/secrets/istio-dns",
                            "emptyDir": {"medium": "Memory"},
                        },
                        {
                            "name": "inject",
                            "mountPath": "/var/lib/istio/inject",
                            "files": [
                                {
                                    "path": "config",
                                    "content": env.get_template('config').render(tconfig),
                                },
                                {
                                    "path": "values",
                                    "content": env.get_template('values').render(tconfig),
                                },
                            ],
                        },
                    ],
                },
            ],
        },
        k8s_resources={
            "kubernetesResources": {
                "customResourceDefinitions": [
                    {"name": crd["metadata"]["name"], "spec": crd["spec"]}
                    for crd in yaml.safe_load_all(Path("files/crds.yaml").read_text())
                ],
                'customResources': custom_resources,
                "mutatingWebhookConfigurations": [
                    {
                        "name": "sidecar-injector",
                        "webhooks": [
                            {
                                "name": "sidecar-injector.istio.io",
                                "clientConfig": {
                                    "service": {
                                        "name": hookenv.service_name(),
                                        "namespace": namespace,
                                        "path": "/inject",
                                        "port": config['webhook-port'],
                                    },
                                    #  "caBundle": ca_bundle,
                                },
                                "rules": [
                                    {
                                        "operations": ["CREATE"],
                                        "apiGroups": [""],
                                        "apiVersions": ["v1"],
                                        "resources": ["pods"],
                                    }
                                ],
                                "failurePolicy": "Fail",
                                "namespaceSelector": {"matchLabels": {"juju-model": namespace}},
                                "objectSelector": {
                                    "matchExpressions": [
                                        {
                                            'key': 'juju-app',
                                            'operator': 'In',
                                            'values': ['nonexistent']
                                            + [route['service'] for route in service_mesh.routes()],
                                        },
                                    ],
                                },
                            }
                        ],
                    }
                ],
            }
        },
    )

    layer.status.maintenance("creating container")
    set_flag("charm.started")
Beispiel #45
0
def launch_default_ingress_controller():
    ''' Launch the Kubernetes ingress controller & default backend (404) '''
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get('ingress'):
        return

    context = {}
    context['arch'] = arch()
    addon_path = '/root/cdk/addons/{}'

    context['defaultbackend_image'] = config.get('default-backend-image')
    if (context['defaultbackend_image'] == ""
            or context['defaultbackend_image'] == "auto"):
        if context['arch'] == 's390x':
            context['defaultbackend_image'] = \
                "gcr.io/google_containers/defaultbackend-s390x:1.4"
        else:
            context['defaultbackend_image'] = \
                "gcr.io/google_containers/defaultbackend:1.4"

    # Render the default http backend (404) replicationcontroller manifest
    manifest = addon_path.format('default-http-backend.yaml')
    render('default-http-backend.yaml', manifest, context)
    hookenv.log('Creating the default http backend.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create default-http-backend. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the ingress daemon set controller manifest
    context['ingress_image'] = config.get('nginx-image')
    if context['ingress_image'] == "" or context['ingress_image'] == "auto":
        if context['arch'] == 's390x':
            context['ingress_image'] = \
                "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
        else:
            context['ingress_image'] = \
                "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15" # noqa
    context['juju_application'] = hookenv.service_name()
    manifest = addon_path.format('ingress-daemon-set.yaml')
    render('ingress-daemon-set.yaml', manifest, context)
    hookenv.log('Creating the ingress daemon set.')
    try:
        kubectl('apply', '-f', manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            'Failed to create ingress controller. Will attempt again next update.'
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state('kubernetes-worker.ingress.available')
    hookenv.open_port(80)
    hookenv.open_port(443)
Beispiel #46
0
def start_charm():
    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    service_name = hookenv.service_name()
    connectors = yaml.safe_load(hookenv.config("connectors"))
    namespace = os.environ["JUJU_MODEL_NAME"]
    port = hookenv.config("port")
    public_url = hookenv.config("public-url")

    oidc_client_info = endpoint_from_name('oidc-client').get_config()
    if not oidc_client_info:
        layer.status.blocked("No OIDC client information found")
        return False

    # Allows setting a basic username/password combo
    static_username = hookenv.config("static-username")
    static_password = hookenv.config("static-password")

    static_config = {}

    if static_username:
        if not static_password:
            layer.status.blocked(
                'Static password is required when static username is set')
            return False

        salt = bcrypt.gensalt()
        hashed = bcrypt.hashpw(static_password.encode('utf-8'),
                               salt).decode('utf-8')
        static_config = {
            'enablePasswordDB':
            True,
            'staticPasswords': [{
                'email': static_username,
                'hash': hashed,
                'username': static_username,
                'userID': str(uuid4()),
            }],
        }

    config = yaml.dump({
        "issuer": f"{public_url}/dex",
        "storage": {
            "type": "kubernetes",
            "config": {
                "inCluster": True
            }
        },
        "web": {
            "http": f"0.0.0.0:{port}"
        },
        "logger": {
            "level": "debug",
            "format": "text"
        },
        "oauth2": {
            "skipApprovalScreen": True
        },
        "staticClients": oidc_client_info,
        "connectors": connectors,
        **static_config,
    })

    # Kubernetes won't automatically restart the pod when the configmap changes
    # unless we manually add the hash somewhere into the Deployment spec, so that
    # it changes whenever the configmap changes.
    config_hash = sha256()
    config_hash.update(config.encode('utf-8'))
    pod_name = f"dex-auth-{config_hash.hexdigest()[:48]}"

    layer.caas_base.pod_spec_set(
        {
            "version":
            2,
            "serviceAccount": {
                "global":
                True,
                "rules": [
                    {
                        "apiGroups": ["dex.coreos.com"],
                        "resources": ["*"],
                        "verbs": ["*"]
                    },
                    {
                        "apiGroups": ["apiextensions.k8s.io"],
                        "resources": ["customresourcedefinitions"],
                        "verbs": ["create"],
                    },
                ],
            },
            "service": {
                "annotations": {
                    "getambassador.io/config":
                    yaml.dump_all([{
                        "apiVersion": "ambassador/v1",
                        "kind": "Mapping",
                        "name": "dex-auth",
                        "prefix": "/dex",
                        "rewrite": "/dex",
                        "service": f"{service_name}.{namespace}:{port}",
                        "timeout_ms": 30000,
                        "bypass_auth": True,
                    }])
                }
            },
            "containers": [{
                "name":
                pod_name,
                "imageDetails": {
                    "imagePath": image_info.registry_path,
                    "username": image_info.username,
                    "password": image_info.password,
                },
                "command": ["dex", "serve", "/etc/dex/cfg/config.yaml"],
                "ports": [{
                    "name": "http",
                    "containerPort": port
                }],
                "files": [{
                    "name": "config",
                    "mountPath": "/etc/dex/cfg",
                    "files": {
                        "config.yaml": config
                    },
                }],
            }],
        },
        {
            "kubernetesResources": {
                "customResourceDefinitions": {
                    crd["metadata"]["name"]: crd["spec"]
                    for crd in yaml.safe_load_all(
                        Path("resources/crds.yaml").read_text())
                }
            }
        },
    )

    layer.status.maintenance("creating container")
    set_flag("charm.started")
 def service_name(self):
     return hookenv.service_name()
Beispiel #48
0
def get_db_helper():
    return MySQLHelper(rpasswdf_template='/var/lib/charm/%s/mysql.passwd' %
                       (service_name()),
                       upasswdf_template='/var/lib/charm/%s/mysql-{}.passwd' %
                       (service_name()))
Beispiel #49
0
def install_hive(hadoop):
    '''
    Anytime our dependencies are available, check to see if we have a valid
    reason to (re)install. These include:
    - initial install
    - HBase has joined/departed
    '''
    # Hive cannot handle - in the metastore db name and
    # mysql uses the service name to name the db
    if "-" in hookenv.service_name():
        hookenv.status_set('blocked', "application name may not contain '-'; "
                                      "redeploy with a different name")
        return

    # Get hbase connection dict if it's available
    if is_state('hbase.ready'):
        hbase = RelationBase.from_state('hbase.ready')
        hbserver = hbase.hbase_servers()[0]
    else:
        hbserver = None

    # Get zookeeper connection dict if it's available
    if is_state('zookeeper.ready'):
        zk = RelationBase.from_state('zookeeper.ready')
        zks = zk.zookeepers()
    else:
        zks = None

    # Use this to determine if we need to reinstall
    deployment_matrix = {
        'hbase': hbserver,
        'zookeepers': zks
    }

    # Handle nuances when installing versus re-installing
    if not is_state('hive.installed'):
        prefix = "installing"

        # On initial install, prime our kv with the current deployment matrix.
        # Subsequent calls will use this to determine if a reinstall is needed.
        data_changed('deployment_matrix', deployment_matrix)
    else:
        prefix = "configuring"

        # Return if our matrix has not changed
        if not data_changed('deployment_matrix', deployment_matrix):
            return

    hookenv.status_set('maintenance', '{} hive'.format(prefix))
    hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))

    hive = Hive()
    hive.install(hbase=hbserver, zk_units=zks)
    hive.restart()
    hive.open_ports()
    set_state('hive.installed')
    report_status()

    # set app version string for juju status output
    hive_version = get_package_version('hive') or 'unknown'
    hookenv.application_version_set(hive_version)
Beispiel #50
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    service_name = hookenv.service_name()
    namespace = os.environ["JUJU_MODEL_NAME"]
    public_url = hookenv.config("public-url")
    port = hookenv.config("port")
    oidc_scopes = hookenv.config("oidc-scopes")

    layer.caas_base.pod_spec_set({
        "version":
        2,
        "service": {
            "annotations": {
                "getambassador.io/config":
                yaml.dump_all([
                    {
                        "apiVersion": "ambassador/v1",
                        "kind": "Mapping",
                        "name": "oidc-gatekeeper",
                        "prefix": "/oidc",
                        "service": f"{service_name}.{namespace}:{port}",
                        "timeout_ms": 30000,
                        "bypass_auth": True,
                    },
                    {
                        "apiVersion": "ambassador/v1",
                        "kind": "AuthService",
                        "name": "oidc-gatekeeper-auth",
                        "auth_service": f"{service_name}.{namespace}:{port}",
                        "allowed_authorization_headers": ["kubeflow-userid"],
                    },
                ])
            }
        },
        "containers": [{
            "name": "oidc-gatekeeper",
            "imageDetails": {
                "imagePath": image_info.registry_path,
                "username": image_info.username,
                "password": image_info.password,
            },
            "ports": [{
                "name": "http",
                "containerPort": port
            }],
            "config": {
                "CLIENT_ID": hookenv.config('client-id'),
                "CLIENT_SECRET": hookenv.config("client-secret"),
                "DISABLE_USERINFO": True,
                "OIDC_PROVIDER": f"{public_url}/dex",
                "OIDC_SCOPES": oidc_scopes,
                "SERVER_PORT": port,
                "SELF_URL": f"{public_url}/oidc",
                "USERID_HEADER": "kubeflow-userid",
                "USERID_PREFIX": "",
                "STORE_PATH": "bolt.db",
                "REDIRECT_URL": f"{public_url}/oidc/login/oidc",
            },
        }],
    })

    layer.status.maintenance("creating container")
    set_flag("charm.started")
Beispiel #51
0
def storage_ceph_connected(ceph):
    ceph_mds = reactive.endpoint_from_flag('ceph-mds.connected')
    ceph_mds.announce_mds_name()
    service = service_name()
    weight = config('ceph-pool-weight')
    replicas = config('ceph-osd-replication-count')

    if config('rbd-pool-name'):
        pool_name = config('rbd-pool-name')
    else:
        pool_name = "{}_data".format(service)

    # The '_' rather than '-' in the default pool name
    # maintains consistency with previous versions of the
    # charm but is inconsistent with ceph-client charms.
    metadata_pool_name = (config('metadata-pool')
                          or "{}_metadata".format(service))
    # Metadata sizing is approximately 20% of overall data weight
    # https://ceph.io/planet/cephfs-ideal-pg-ratio-between-metadata-and-data-pools/
    metadata_weight = weight * 0.20
    # Resize data pool weight to accomodate metadata weight
    weight = weight - metadata_weight
    extra_pools = []

    bluestore_compression = None
    with charm.provide_charm_instance() as cephfs_charm:
        # TODO: move this whole method into the charm class and add to the
        # common pool creation logic in charms.openstack. For now we reuse
        # the common bluestore compression wrapper here.
        try:
            bluestore_compression = cephfs_charm._get_bluestore_compression()
        except ValueError as e:
            ch_core.hookenv.log('Invalid value(s) provided for Ceph BlueStore '
                                'compression: "{}"'.format(str(e)))

    if config('pool-type') == 'erasure-coded':
        # General EC plugin config
        plugin = config('ec-profile-plugin')
        technique = config('ec-profile-technique')
        device_class = config('ec-profile-device-class')
        bdm_k = config('ec-profile-k')
        bdm_m = config('ec-profile-m')
        # LRC plugin config
        bdm_l = config('ec-profile-locality')
        crush_locality = config('ec-profile-crush-locality')
        # SHEC plugin config
        bdm_c = config('ec-profile-durability-estimator')
        # CLAY plugin config
        bdm_d = config('ec-profile-helper-chunks')
        scalar_mds = config('ec-profile-scalar-mds')
        # Weight for EC pool
        ec_pool_weight = config('ec-pool-weight')
        # Profile name
        profile_name = (config('ec-profile-name')
                        or "{}-profile".format(service))
        # Create erasure profile
        ceph_mds.create_erasure_profile(name=profile_name,
                                        k=bdm_k,
                                        m=bdm_m,
                                        lrc_locality=bdm_l,
                                        lrc_crush_locality=crush_locality,
                                        shec_durability_estimator=bdm_c,
                                        clay_helper_chunks=bdm_d,
                                        clay_scalar_mds=scalar_mds,
                                        device_class=device_class,
                                        erasure_type=plugin,
                                        erasure_technique=technique)

        # Create EC data pool
        ec_pool_name = 'ec_{}'.format(pool_name)

        # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
        # the unpacking of the BlueStore compression arguments as part of
        # the function arguments. Until then we need to build the dict
        # prior to the function call.
        kwargs = {
            'name': ec_pool_name,
            'erasure_profile': profile_name,
            'weight': ec_pool_weight,
            'app_name': ceph_mds.ceph_pool_app_name,
            'allow_ec_overwrites': True,
        }
        if bluestore_compression:
            kwargs.update(bluestore_compression)
        ceph_mds.create_erasure_pool(**kwargs)

        # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
        # the unpacking of the BlueStore compression arguments as part of
        # the function arguments. Until then we need to build the dict
        # prior to the function call.
        kwargs = {
            'name': pool_name,
            'weight': weight,
            'app_name': ceph_mds.ceph_pool_app_name,
        }
        if bluestore_compression:
            kwargs.update(bluestore_compression)
        ceph_mds.create_replicated_pool(**kwargs)
        ceph_mds.create_replicated_pool(name=metadata_pool_name,
                                        weight=metadata_weight,
                                        app_name=ceph_mds.ceph_pool_app_name)
        extra_pools = [ec_pool_name]
    else:
        # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
        # the unpacking of the BlueStore compression arguments as part of
        # the function arguments. Until then we need to build the dict
        # prior to the function call.
        kwargs = {
            'name': pool_name,
            'replicas': replicas,
            'weight': weight,
            'app_name': ceph_mds.ceph_pool_app_name,
        }
        if bluestore_compression:
            kwargs.update(bluestore_compression)
        ceph_mds.create_replicated_pool(**kwargs)
        ceph_mds.create_replicated_pool(name=metadata_pool_name,
                                        replicas=replicas,
                                        weight=metadata_weight,
                                        app_name=ceph_mds.ceph_pool_app_name)
    ceph_mds.request_cephfs(service, extra_pools=extra_pools)
Beispiel #52
0
def ceph_config_file():
    return CHARM_CEPH_CONF.format(service_name())
Beispiel #53
0
 def ssl_ca_file(self):
     return '/var/lib/charm/{}/rabbit-client-ca.pem'.format(
         hookenv.service_name())
Beispiel #54
0
def caPath():
    return '/usr/local/share/ca-certificates/{}.crt'.format(
        hookenv.service_name())
Beispiel #55
0
def render_and_launch_ingress():
    """Launch the Kubernetes ingress controller & default backend (404)"""
    config = hookenv.config()

    # need to test this in case we get in
    # here from a config change to the image
    if not config.get("ingress"):
        return

    context = {}
    context["arch"] = arch()
    addon_path = "/root/cdk/addons/{}"
    context["juju_application"] = hookenv.service_name()

    # If present, workers will get the ingress containers from the configured
    # registry. Otherwise, we'll set an appropriate upstream image registry.
    registry_location = get_registry_location()

    context["defaultbackend_image"] = config.get("default-backend-image")
    if (
        context["defaultbackend_image"] == ""
        or context["defaultbackend_image"] == "auto"
    ):
        if registry_location:
            backend_registry = registry_location
        else:
            backend_registry = "k8s.gcr.io"
        if context["arch"] == "s390x":
            context["defaultbackend_image"] = "{}/defaultbackend-s390x:1.4".format(
                backend_registry
            )
        elif context["arch"] == "ppc64el":
            context["defaultbackend_image"] = "{}/defaultbackend-ppc64le:1.5".format(
                backend_registry
            )
        else:
            context["defaultbackend_image"] = "{}/defaultbackend-{}:1.5".format(
                backend_registry, context["arch"]
            )

    # Render the ingress daemon set controller manifest
    context["ssl_chain_completion"] = config.get("ingress-ssl-chain-completion")
    context["enable_ssl_passthrough"] = config.get("ingress-ssl-passthrough")
    context["default_ssl_certificate_option"] = None
    if config.get("ingress-default-ssl-certificate") and config.get(
        "ingress-default-ssl-key"
    ):
        context["default_ssl_certificate"] = b64encode(
            config.get("ingress-default-ssl-certificate").encode("utf-8")
        ).decode("utf-8")
        context["default_ssl_key"] = b64encode(
            config.get("ingress-default-ssl-key").encode("utf-8")
        ).decode("utf-8")
        default_certificate_option = (
            "- --default-ssl-certificate=" "$(POD_NAMESPACE)/default-ssl-certificate"
        )
        context["default_ssl_certificate_option"] = default_certificate_option
    context["ingress_image"] = config.get("nginx-image")
    if context["ingress_image"] == "" or context["ingress_image"] == "auto":
        if context["arch"] == "ppc64el":
            # multi-arch image doesn't include ppc64le, have to use an older version
            image = "nginx-ingress-controller-ppc64le"
            context["ingress_uid"] = "33"
            context["ingress_image"] = "/".join(
                [
                    registry_location or "quay.io",
                    "kubernetes-ingress-controller/{}:0.20.0".format(image),
                ]
            )
        else:
            context["ingress_uid"] = "101"
            context["ingress_image"] = "/".join(
                [
                    registry_location or "us.gcr.io",
                    "k8s-artifacts-prod/ingress-nginx/controller:v1.2.0",
                ]
            )

    kubelet_version = get_version("kubelet")
    if kubelet_version < (1, 9):
        context["daemonset_api_version"] = "extensions/v1beta1"
        context["deployment_api_version"] = "extensions/v1beta1"
    elif kubelet_version < (1, 16):
        context["daemonset_api_version"] = "apps/v1beta2"
        context["deployment_api_version"] = "extensions/v1beta1"
    else:
        context["daemonset_api_version"] = "apps/v1"
        context["deployment_api_version"] = "apps/v1"
    context["use_forwarded_headers"] = (
        "true" if config.get("ingress-use-forwarded-headers") else "false"
    )

    manifest = addon_path.format("ingress-daemon-set.yaml")
    render("ingress-daemon-set.yaml", manifest, context)
    hookenv.log("Creating the ingress daemon set.")
    try:
        kubectl("apply", "-f", manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            "Failed to create ingress controller. Will attempt again next update."
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    # Render the default http backend (404) deployment manifest
    # needs to happen after ingress-daemon-set since that sets up the namespace
    manifest = addon_path.format("default-http-backend.yaml")
    render("default-http-backend.yaml", manifest, context)
    hookenv.log("Creating the default http backend.")
    try:
        kubectl("apply", "-f", manifest)
    except CalledProcessError as e:
        hookenv.log(e)
        hookenv.log(
            "Failed to create default-http-backend. Will attempt again next update."
        )  # noqa
        hookenv.close_port(80)
        hookenv.close_port(443)
        return

    set_state("kubernetes-worker.ingress.available")
    hookenv.open_port(80)
    hookenv.open_port(443)
Beispiel #56
0
def start_charm():
    layer.status.maintenance('configuring container')

    config = hookenv.config()
    image_info = layer.docker_resource.get_info('pytorch-operator-image')

    crd = yaml.load(Path('files/crd-v1beta1.yaml').read_text())

    conf_data = {}
    if config['pytorch-default-image']:
        conf_data['pytorchImage'] = config['pytorch-default-image']

    layer.caas_base.pod_spec_set({
        'containers': [
            {
                'name':
                'pytorch-operator',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': [
                    '/pytorch-operator.v1beta1',
                    '--alsologtostderr',
                    '-v=1',
                ],
                # Otherwise Juju sits at `waiting for container`
                'ports': [
                    {
                        'name': 'dummy',
                        'containerPort': 9999,
                    },
                ],
                'config': {
                    'MY_POD_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                    'MY_POD_NAME': hookenv.service_name(),
                },
                'files': [
                    {
                        'name': 'configs',
                        'mountPath': '/etc/config',
                        'files': {
                            'controller_config_file.yaml':
                            yaml.dump(conf_data),
                        },
                    },
                ],
            },
        ],
        # Backwards compatibility for juju < 2.5.4
        'customResourceDefinition': [{
            'group':
            crd['spec']['group'],
            'version':
            crd['spec']['version'],
            'scope':
            crd['spec']['scope'],
            'kind':
            crd['spec']['names']['kind'],
            'validation':
            crd['spec']['validation']['openAPIV3Schema']['properties']['spec'],
        }],
        'customResourceDefinitions': {
            crd['metadata']['name']: crd['spec'],
        },
    })

    layer.status.maintenance('creating container')
    set_flag('charm.kubeflow-pytorch-operator.started')
Beispiel #57
0
def configure_mesh():
    endpoint_from_name('service-mesh').add_route(
        prefix='/argo/',
        rewrite='/',
        service=hookenv.service_name(),
        port=hookenv.config('port'))
Beispiel #58
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')
    service_name = hookenv.service_name()

    port = hookenv.config('port')

    profiles = endpoint_from_name('kubeflow-profiles').services()[0]
    profiles_host = profiles['service_name']
    profiles_port = profiles['hosts'][0]['port']
    model = os.environ['JUJU_MODEL_NAME']

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'global':
            True,
            'rules': [
                {
                    'apiGroups': [''],
                    'resources': ['namespaces'],
                    'verbs': ['get', 'list', 'create', 'delete'],
                },
                {
                    'apiGroups': ['kubeflow.org'],
                    'resources': ['notebooks', 'poddefaults'],
                    'verbs': ['get', 'list', 'create', 'delete'],
                },
                {
                    'apiGroups': [''],
                    'resources': ['persistentvolumeclaims'],
                    'verbs': ['create', 'delete', 'get', 'list'],
                },
                {
                    'apiGroups': ['storage.k8s.io'],
                    'resources': ['storageclasses'],
                    'verbs': ['get', 'list', 'watch'],
                },
                {
                    'apiGroups': [''],
                    'resources': ['pods', 'pods/log', 'secrets', 'services'],
                    'verbs': ['*'],
                },
                {
                    'apiGroups': ['', 'apps', 'extensions'],
                    'resources': ['deployments', 'replicasets'],
                    'verbs': ['*'],
                },
                {
                    'apiGroups': ['kubeflow.org'],
                    'resources': ['*'],
                    'verbs': ['*']
                },
                {
                    'apiGroups': ['batch'],
                    'resources': ['jobs'],
                    'verbs': ['*']
                },
            ],
        },
        'service': {
            'annotations': {
                'getambassador.io/config':
                yaml.dump_all([{
                    'apiVersion': 'ambassador/v0',
                    'kind': 'Mapping',
                    'name': 'jupyter-web',
                    'prefix': '/jupyter/',
                    'service': f'{service_name}:{port}',
                    'timeout_ms': 30000,
                    'add_request_headers': {
                        'x-forwarded-prefix': '/jupyter'
                    },
                }])
            }
        },
        'containers': [{
            'name':
            'jupyter-web',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'ports': [{
                'name': 'http',
                'containerPort': port
            }],
            'config': {
                'USERID_HEADER':
                'kubeflow-userid',
                'USERID_PREFIX':
                '',
                'KFAM':
                f'{profiles_host}.{model}.svc.cluster.local:{profiles_port}',
            },
            'files': [{
                'name': 'configs',
                'mountPath': '/etc/config',
                'files': {
                    Path(filename).name: Path(filename).read_text()
                    for filename in glob('files/*')
                },
            }],
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Beispiel #59
0
def ceph_broken():
    service = service_name()
    delete_keyring(service=service)
    CONFIGS.write_all()
Beispiel #60
0
    def set_pod_spec(self, event):
        if not self.model.unit.is_leader():
            logger.info('Not a leader, skipping set_pod_spec')
            self.model.unit.status = ActiveStatus()
            return

        self.model.unit.status = MaintenanceStatus('Setting pod spec')

        try:
            image_details = self.image.fetch()
        except OCIImageResourceError as e:
            self.model.unit.status = e.status
            return

        model = os.environ['JUJU_MODEL_NAME']
        run(
            [
                "openssl",
                "req",
                "-x509",
                "-newkey",
                "rsa:4096",
                "-keyout",
                "key.pem",
                "-out",
                "cert.pem",
                "-days",
                "365",
                "-subj",
                f"/CN={hookenv.service_name()}.{model}.svc",
                "-nodes",
            ],
            check=True,
        )

        ca_bundle = b64encode(Path('cert.pem').read_bytes()).decode('utf-8')

        self.model.pod.set_spec(
            {
                'version':
                3,
                'serviceAccount': {
                    'roles': [{
                        'global':
                        True,
                        'rules': [
                            {
                                'apiGroups': ['kubeflow.org'],
                                'resources': ['poddefaults'],
                                'verbs': [
                                    'get',
                                    'list',
                                    'watch',
                                    'update',
                                    'create',
                                    'patch',
                                    'delete',
                                ],
                            },
                        ],
                    }],
                },
                'containers': [{
                    'name':
                    'admission-webhook',
                    'imageDetails':
                    image_details,
                    'ports': [{
                        'name': 'webhook',
                        'containerPort': 443
                    }],
                    'volumeConfig': [{
                        'name':
                        'certs',
                        'mountPath':
                        '/etc/webhook/certs',
                        'files': [
                            {
                                'path': 'cert.pem',
                                'content': Path('cert.pem').read_text()
                            },
                            {
                                'path': 'key.pem',
                                'content': Path('key.pem').read_text()
                            },
                        ],
                    }],
                }],
            },
            k8s_resources={
                'kubernetesResources': {
                    'customResourceDefinitions': [{
                        'name':
                        crd['metadata']['name'],
                        'spec':
                        crd['spec']
                    } for crd in yaml.safe_load_all(
                        Path("src/crds.yaml").read_text())],
                    'mutatingWebhookConfigurations': [{
                        'name':
                        'admission-webhook',
                        'webhooks': [
                            {
                                'name':
                                'admission-webhook.kubeflow.org',
                                'failurePolicy':
                                'Fail',
                                'clientConfig': {
                                    'caBundle': ca_bundle,
                                    'service': {
                                        'name': hookenv.service_name(),
                                        'namespace': model,
                                        'path': '/apply-poddefault',
                                    },
                                },
                                "objectSelector": {
                                    "matchExpressions": [
                                        {
                                            "key": "juju-app",
                                            "operator": "NotIn",
                                            "values": ["admission-webhook"],
                                        },
                                        {
                                            "key": "juju-operator",
                                            "operator": "NotIn",
                                            "values": ["admission-webhook"],
                                        },
                                    ]
                                },
                                'rules': [{
                                    'apiGroups': [''],
                                    'apiVersions': ['v1'],
                                    'operations': ['CREATE'],
                                    'resources': ['pods'],
                                }],
                            },
                        ],
                    }],
                }
            },
        )

        self.model.unit.status = ActiveStatus()