def update_reverseproxy_config():
    website = endpoint_from_flag('website.available')
    port = hookenv.config().get('registry-port')
    services_yaml = """
- service_name: %(app)s
  service_host: 0.0.0.0
  service_port: %(port)s
  service_options:
   - mode http
   - balance leastconn
   - option httpchk GET / HTTP/1.0
  servers:
   - [%(unit)s, %(addr)s, %(port)s, 'check port %(port)s']
""" % {
        'addr': hookenv.unit_private_ip(),
        'port': port,
        'app': hookenv.application_name(),
        'unit': hookenv.local_unit().replace('/', '-'),
    }
    website.configure(port=port)
    website.set_remote(all_services=services_yaml)

    # A proxy may change our netloc; if we have clients, tell them.
    netloc = layer.docker_registry.get_netloc()
    if (is_flag_set('charm.docker-registry.client-configured')
            and data_changed('proxy_netloc', netloc)):
        configure_client()
Beispiel #2
0
def run_container():
    '''
    Wrapper method to launch a docker container under the direction of Juju,
    and provide feedback/notifications to the end user.
    https://www.collaboraoffice.com/code/apache-reverse-proxy/

    The collabora takes some time to start.
    Test that it works: curl -Ivk http://<collabora>:<port>
    -> OK
    '''

    # Collabora wants dots escaped in the domain name.
    # nextcloud_domain should be that of "nextcloud"
    d = hookenv.config('nextcloud_domain').replace('.', '\.')

    p = hookenv.config('port')

    hookenv.open_port(p)

    run_command = [
        "docker", "run", "-t", "-d", "--name",
        hookenv.application_name(), "-p", ":{}:9980".format(p), "-e",
        "domain={}".format(d), "--restart", "always", "--cap-add", "MKNOD",
        "collabora/code"
    ]
    check_call(run_command)

    clear_flag('collabora.stopped')

    set_flag('collabora.started')

    hookenv.status_set('waiting', 'container starting ...')
Beispiel #3
0
def install_omnivector_challenge():
    # If your charm has other dependencies before it can install,
    # add those as @when() clauses above., or as additional @when()
    # decorated handlers below
    #
    # See the following for information about reactive charms:
    #
    #  * https://jujucharms.com/docs/devel/developer-getting-started
    #  * https://github.com/juju-solutions/layer-basic#overview
    #

    # further links
    # - https://ubuntu.com/blog/charming-discourse-with-the-reactive-framework

    app = application_name()
    venv_root = f"/srv/omnivector-challenge/venv"
    status_set("maintenance", "Creating Python virtualenv")
    check_call(['/usr/bin/python3', '-m', 'venv', venv_root])
    status_set("maintenance", "Installing Python requirements")
    check_call([f'{venv_root}/bin/pip', 'install', 'gunicorn'])
    check_call([
        f'{venv_root}/bin/pip', 'install', '-r',
        '/srv/omnivector-challenge/app/requirements.txt'
    ])
    set_state('omnivector_challenge.installed')
def install_ceph_common():
    """Install ceph-common tools.

    :return: None
    """
    ceph_client = endpoint_from_flag("ceph-client.available")

    ceph_context = {
        "mon_hosts": " ".join(ceph_client.mon_hosts()),
        "auth_supported": ceph_client.auth,
        "use_syslog": "true",
        "ceph_public_network": "",
        "ceph_cluster_network": "",
        "loglevel": 1,
        "hostname": socket.gethostname(),
    }
    # Install the ceph common utilities.
    apt_install(["ceph-common"], fatal=True)

    CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True)
    # Render the ceph configuration from the ceph conf template.
    render("ceph.conf", str(CEPH_CONF), ceph_context)

    # The key can rotate independently of other ceph config, so validate it.
    try:
        with open(str(CEPH_KEYRING), "w") as key_file:
            key_file.write(
                "[client.{}]\n\tkey = {}\n".format(
                    hookenv.application_name(), ceph_client.key
                )
            )
    except IOError as err:
        hookenv.log("IOError writing Ceph keyring: {}".format(err))
def update_vips():
    hacluster = endpoint_from_flag('ha.connected')
    config = hookenv.config()
    original_vips = set(config.previous('ha-cluster-vip').split())
    new_vips = set(config['ha-cluster-vip'].split())
    old_vips = original_vips - new_vips

    for vip in old_vips:
        hacluster.remove_vip(hookenv.application_name(), vip)

    clear_flag('layer-hacluster.configured')
def send_k8s_request():
    if not config.get('fqdns'):
        status_set('blocked', 'Waiting for fqdns config')
        return
    if not config.get('nodeport'):
        status_set('blocked', 'Waiting for nodeport config')
        return
    endpoint = endpoint_from_flag('endpoint.kubernetes-deployer.available')
    context = {'name': application_name(), 'fqdns': config.get('fqdns'), 'nodeport': config.get('nodeport')}
    resource = render('resource.yaml', None, context)
    endpoint.send_create_request([yaml.load(resource)])
    set_flag('client.k8s-requested')
Beispiel #7
0
def configure_mysql():
    mysql = endpoint_from_name('mysql')

    for i in range(len(mysql.relations)):
        mysql.provide_database(
            request_id=i,
            database_name=hookenv.config('database'),
            port=hookenv.config('port'),
            host=hookenv.application_name(),
            user='******',
            password=hookenv.config('root-password'),
        )
Beispiel #8
0
def scrape_available(scrape):
    if hookenv.config().get("ingress"):
        scrape.configure(
            port=10254,
            labels=dict(
                juju_model=hookenv.model_name(),
                juju_model_uuid=hookenv.model_uuid(),
                juju_application=hookenv.application_name(),
                juju_unit=hookenv.local_unit(),
                service="nginx-ingress",
            ),
        )
def configure_hacluster():
    """Configure HA resources in corosync"""
    hacluster = endpoint_from_flag('ha.connected')
    vips = hookenv.config('ha-cluster-vip').split()
    dns_record = hookenv.config('ha-cluster-dns')
    if vips and dns_record:
        set_flag('layer-hacluster.dns_vip.invalid')
        msg = "Unsupported configuration. " \
              "ha-cluster-vip and ha-cluster-dns cannot both be set",
        hookenv.log(msg)
        return
    else:
        clear_flag('layer-hacluster.dns_vip.invalid')
    if vips:
        for vip in vips:
            hacluster.add_vip(hookenv.application_name(), vip)
    elif dns_record:
        layer_options = layer.options('hacluster')
        binding_address = layer_options.get('binding_address')
        ip = get_ingress_address(binding_address)
        hacluster.add_dnsha(hookenv.application_name(), ip, dns_record,
                            'public')

    services = db.get('layer-hacluster.services', {
        'current_services': {},
        'desired_services': {},
        'deleted_services': {}
    })
    for name, service in services['deleted_services'].items():
        hacluster.remove_systemd_service(name, service)
    for name, service in services['desired_services'].items():
        hacluster.add_systemd_service(name, service)
        services['current_services'][name] = service

    services['deleted_services'] = {}
    services['desired_services'] = {}

    hacluster.bind_resources()
    set_flag('layer-hacluster.configured')
Beispiel #10
0
    def send_request_if_needed(self, request):
        """Send broker request if an equivalent request has not been sent

        @param request: A CephBrokerRq object
        """
        if is_request_sent(request, relation=self.relation_name):
            log('Request already sent but not complete, '
                'not sending new request')
        else:
            for relation in self.relations:
                relation.to_publish['broker_req'] = json.loads(request.request)
                relation.to_publish_raw['application-name'] = application_name(
                )
                relation.to_publish_raw['unit-name'] = local_unit()
Beispiel #11
0
def write_collabora_config():
    """Configure the docker container."""

    ctxt = {
        'domain': hookenv.config('nextcloud_domain'),
        'ssl_enable': str(hookenv.config('ssl_enable')).lower(),
        'ssl_termination': str(hookenv.config('ssl_termination')).lower()
    }

    render('loolwsd.xml', '/srv/loolwsd.xml', context=ctxt, perms=0o777)

    check_call([
        "docker", "cp", "/srv/loolwsd.xml",
        "{}:/etc/loolwsd/loolwsd.xml".format(hookenv.application_name())
    ])

    check_call(["docker", "restart", hookenv.application_name()])

    # Remove temporary config file
    check_call(["rm", "/srv/loolwsd.xml"])

    set_flag('collabora.configured')

    hookenv.status_set('waiting', 'configured & restarting.')
Beispiel #12
0
def docker_inspect_state():
    """get the state from a 'docker inspect'
    Extract attributes:
    "Status"     "Running"     "Paused"    "Restarting"
    "OOMKilled"    "Dead"    "Pid"    "ExitCode"
    "Error"    "StartedAt"    "FinishedAt"

    :return json"""

    cmd_line = "/usr/bin/docker inspect {}".format(hookenv.application_name())

    try:

        json_output = json.loads(
            subprocess.check_output([cmd_line], shell=True).decode('UTF-8'))

        return json_output[0]["State"]

    except:

        log("Failed running 'docker inspect {}'".format(
            hookenv.application_name()))

        return None
Beispiel #13
0
def remove_deferred_restarts_check(nrpe):
    """
    Remove NRPE check for services with deferred service restarts.

    :param NRPE nrpe: NRPE object to remove check from
    """
    unit_name = local_unit().replace('/', '-')
    shortname = unit_name + '_deferred_restarts'
    check_cmd = 'check_deferred_restarts.py --application {}'.format(
        application_name())

    log('Removing deferred restarts nrpe check: {}'.format(shortname))
    nrpe.remove_check(
        shortname=shortname,
        description='Check deferred service restarts {}'.format(unit_name),
        check_cmd=check_cmd)
def make_pod_spec():
    """Make pod specification for Kubernetes

    Returns:
        pod_spec: Pod specification for Kubernetes
    """
    md = metadata()
    cfg = config()
    image_info = layer.docker_resource.get_info('ubuntu-image')
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    app_name = application_name()

    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'application_name': app_name,
    }
    data.update(cfg)
    return pod_spec_template % data
Beispiel #15
0
def _get_auth_basic():
    '''Process our basic auth configuration.

    When required config is present (or changes), write an htpasswd file
    and construct a valid auth dict. When config is missing, remove any
    existing htpasswd file.

    :return: dict of htpasswd auth data, or None
    '''
    charm_config = hookenv.config()
    password = charm_config.get('auth-basic-password')
    user = charm_config.get('auth-basic-user')

    auth = {}
    htpasswd_file = '/etc/docker/registry/htpasswd'
    if user and password:
        auth = {
            'realm': hookenv.application_name(),
            'path': htpasswd_file,
        }
        # Only write a new htpasswd if something changed
        if data_changed('basic_auth', '{}:{}'.format(user, password)):
            if _write_htpasswd(htpasswd_file, user, password):
                msg = 'Wrote new {}; htpasswd auth is available'.format(
                    htpasswd_file)
            else:
                msg = 'Failed to write {}; htpasswd auth is unavailable'.format(
                    htpasswd_file)
                _remove_if_exists(htpasswd_file)
        else:
            msg = 'htpasswd auth is available'
    else:
        msg = 'Missing config: htpasswd auth is unavailable'
        _remove_if_exists(htpasswd_file)

    hookenv.log(msg, level=hookenv.INFO)
    return auth if os.path.isfile(htpasswd_file) else None
Beispiel #16
0
    def to_publish_app(self):
        """
        This is the relation data that the local app publishes so it is
        visible to all related units. Use this to communicate with related
        apps. It is a writeable
        :class:`~charms.reactive.endpoints.JSONUnitDataView`.

        Only the leader can set the app-level relation data.

        All values stored in this collection will be automatically JSON
        encoded when they are published. This means that they need to be JSON
        serializable! Mappings stored in this collection will be encoded with
        sorted keys, to ensure that the encoded representation will only change
        if the actual data changes.

        Changes to this data are published at the end of a succesfull hook. The
        data is reset when a hook fails.
        """
        if self._app_data is None:
            # using JSONUnitDataView though it's name includes unit.
            self._app_data = JSONUnitDataView(hookenv.relation_get(
                app=hookenv.application_name(), rid=self.relation_id),
                                              writeable=True)
        return self._app_data
def ceph_cli(*args, timeout=60):
    cmd = ["ceph", "--user", hookenv.application_name()] + list(args)
    return check_output(cmd, timeout=timeout).decode("UTF-8")
Beispiel #18
0
def configure_http(http):
    http.configure(port=hookenv.config('port'),
                   hostname=hookenv.application_name())
Beispiel #19
0
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charms.reactive.helpers import data_changed

from charmhelpers.core import hookenv, unitdata

from pathlib import Path
from shlex import split

from subprocess import check_call
from subprocess import check_output

db = unitdata.kv()
HTTP_RELATION = "kubernetes-master"  # wokeignore:rule=master
APP = hookenv.application_name()
USER = "******"
certs_dir = Path("/srv/kubernetes")
ca_crt_path = certs_dir / "ca.crt"


@hook("upgrade-charm")
def reset_delivery_states():
    """Remove the state set when resources are unpacked."""
    install_snaps()


@when("kubernetes-e2e.installed")
def report_status():
    """Report the status of the charm."""
    messaging()
Beispiel #20
0
 def __init__(self):
     self.iperf_out = '/home/ubuntu/iperf_output.' + \
         hookenv.application_name() + '.txt'
Beispiel #21
0
from subprocess import check_call
from pathlib import Path
from os import environ

from charmhelpers.core.hookenv import log, application_name
from charms.reactive import not_unless

from charms.layer import options

_cfg = options.get("venv")

ENV_NAME = _cfg["env_name"] if _cfg["env_name"] else application_name()
log("ENV_NAME: {}".format(ENV_NAME))
ENV_DIR = Path("/opt/juju_venvs") / ENV_NAME
ENV_BIN = ENV_DIR / "bin"


@not_unless("venv.active")
def call_from_env(args):
    """
    Run command with arguments from inside the venv. Wait for command
    to complete. If the return code was zero then return, otherwise
    raise CalledProcessError. The CalledProcessError object will have
    the return code in the returncode attribute.
    """
    cmd = " ".join(args)
    log("Running {} from venv".format(cmd))
    check_call(". {}/activate; {}".format(ENV_BIN, cmd), shell=True)


def pip_install(package):
Beispiel #22
0
def update_reverseproxy_config():
    '''Configure a reverse proxy.

    The lead unit is responsible for setting appropriate proxy config for all
    known registry peers. The oldest known peer will be configured as the
    primary proxied server. Other peers will be configured as backup servers
    which can take over if the primary fails.
    '''
    website = endpoint_from_flag('website.available')
    port = hookenv.config().get('registry-port')

    # Gather data about our peers, including ourself
    peers = peer_ips(peer_relation="peer")
    peers[hookenv.local_unit()] = hookenv.unit_private_ip()

    # Construct a list of server stanzas
    # NB: use oldest peer (the first unit name in our sorted peers list)
    # versus juju leadership to determine primary vs backup servers:
    #  https://bugs.launchpad.net/layer-docker-registry/+bug/1815459
    common_opts = "check inter 2000 rise 2 fall 5 maxconn 4096"
    is_primary = True
    tls_opts = ""
    if (is_flag_set('config.set.tls-cert-blob')
            and is_flag_set('config.set.tls-key-blob')):
        tls_ca_config = hookenv.config().get('tls-ca-path')
        tls_opts = ("ssl check-ssl crt /var/lib/haproxy/default.pem "
                    "ca-file %s verify required" % tls_ca_config)
    servers = []
    for unit in sorted(peers):
        if is_primary:
            server_opts = common_opts
            is_primary = False
        else:
            server_opts = common_opts + ' backup'
        server_opts = "{} {}".format(server_opts, tls_opts)
        servers.append('   - [{name}, {ip}, {port}, {opts}]'.format(
            name=unit.replace('/', '-'),
            ip=peers[unit],
            port=port,
            opts=server_opts))

    services_yaml = """
- service_name: %(app)s
  service_host: 0.0.0.0
  service_port: %(port)s
  service_options:
   - mode %(mode)s
   - balance leastconn
   - option httpchk GET / HTTP/1.0
  servers:
%(servers)s
""" % {
        'mode': 'tcp' if tls_opts != '' else 'http',
        'app': hookenv.application_name(),
        'port': port,
        'servers': "\n".join(servers),
    }
    # Send yaml to the proxy on initial relation and when it changes.
    if data_changed('proxy_stanza', services_yaml):
        # NB: interface needs configure() to set ip/host/port data and
        # set_remote for the blob of services.
        website.configure(port=port)
        website.set_remote(services=services_yaml)

    # A proxy may change our netloc; if we have clients, tell them.
    netloc = layer.docker_registry.get_netloc()
    if (is_flag_set('charm.docker-registry.client-configured')
            and data_changed('proxy_netloc', netloc)):
        configure_client()

    # Early versions of this charm incorrectly set an 'all_services'
    # key on the website relation. Kill it.
    if not is_flag_set('charm.docker-registry.proxy-data.validated'):
        website.set_remote(all_services=None)
        set_flag('charm.docker-registry.proxy-data.validated')

    # Ensure we'll validate website relation data from a follower perspective
    # if we ever lose leadership.
    clear_flag('charm.docker-registry.proxy-follower.validated')
Beispiel #23
0
def make_pod_spec():
    """Generate the pod spec.
    """

    md = metadata()
    cfg = config()

    image_info = layer.docker_resource.get_info(ECK_OPERATOR)

    roles = get_roles_from_yaml()
    operator = get_operator_from_yaml()
    operator_namespace = model_name()
    operator_roles = cfg.get('operator-roles')
    service_account_name = application_name()
    service_name = md.get('name')

    spec = operator['spec']['template']['spec']
    container = spec['containers'][0]
    operator_ports = container['ports']
    operator_resources = container['resources']
    termination_grace_period_seconds = spec['terminationGracePeriodSeconds']

    pod_spec = {
        'version': 2,
        'containers': [
            {
                'name': service_name,
                'args': [
                    "manager",
                    "--operator-roles",
                    operator_roles,
                    "--operator-namespace",
                    operator_namespace,
                    "--enable-debug-logs=false",
                ],
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'imagePullPolicy': "IfNotPresent",
                'config': {
                    'OPERATOR_NAMESPACE': operator_namespace,
                    'WEBHOOK_SECRET': "webhook-server-secret",
                    'WEBHOOK_PODS_LABEL': "elastic-operator",
                    'OPERATOR_IMAGE': image_info.registry_path,
                },
                'resources': operator_resources,
                'ports': operator_ports,
                'terminationMessagePath': "/dev/termination-log",
                'terminationMessagePolicy': "File",
            },
        ],
        'dnsPolicy': "ClusterFirst",
        'restartPolicy': "Always",
        'serviceAccountName': service_account_name,
        'serviceAccountRoles': {
            'automountServiceAccountToken': True,
            'rules': roles,
        },
        'terminationGracePeriodSeconds': termination_grace_period_seconds,
    }
    return pod_spec
Beispiel #24
0
def _get_secret_backend():
    app_name = hookenv.application_name()
    return "charm-{}".format(app_name)