def ceph_access(rid=None, unit=None):
    '''Setup libvirt secret for specific ceph backend access'''
    key = relation_get('key', unit, rid)
    uuid = relation_get('secret-uuid', unit, rid)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key and uuid:
        secrets_filename = CEPH_BACKEND_SECRET.format(
            remote_service_name(rid)
        )
        render(os.path.basename(CEPH_SECRET), secrets_filename,
               context={'ceph_secret_uuid': uuid,
                        'service_name': remote_service_name(rid)})
        create_libvirt_secret(secret_file=secrets_filename,
                              secret_uuid=uuid,
                              key=key)
Ejemplo n.º 2
0
def ceph_access(rid=None, unit=None):
    '''Setup libvirt secret for specific ceph backend access'''
    key = relation_get('key', unit, rid)
    uuid = relation_get('secret-uuid', unit, rid)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key and uuid:
        secrets_filename = CEPH_BACKEND_SECRET.format(remote_service_name(rid))
        render(os.path.basename(CEPH_SECRET),
               secrets_filename,
               context={
                   'ceph_secret_uuid': uuid,
                   'service_name': remote_service_name(rid)
               })
        create_libvirt_secret(secret_file=secrets_filename,
                              secret_uuid=uuid,
                              key=key)
def ceph_access(rid=None, unit=None):
    '''Setup libvirt secret for specific ceph backend access'''
    def _configure_keyring(service_name, key, uuid):
        if config('virt-type') in LIBVIRT_TYPES:
            secrets_filename = CEPH_BACKEND_SECRET.format(service_name)
            render(os.path.basename(CEPH_SECRET),
                   secrets_filename,
                   context={
                       'ceph_secret_uuid': uuid,
                       'service_name': service_name
                   })
            create_libvirt_secret(secret_file=secrets_filename,
                                  secret_uuid=uuid,
                                  key=key)
        # NOTE(jamespage): LXD ceph integration via host rbd mapping, so
        #                  install keyring for rbd commands to use
        ensure_ceph_keyring(service=service_name,
                            user='******',
                            group='nova',
                            key=key)

    ceph_keyrings = relation_get('keyrings')
    if ceph_keyrings:
        for keyring in json.loads(ceph_keyrings):
            _configure_keyring(keyring['name'], keyring['key'],
                               keyring['secret-uuid'])
    else:
        # NOTE: keep backwards compatibility with previous relation data
        key = relation_get('key', unit, rid)
        uuid = relation_get('secret-uuid', unit, rid)
        if key and uuid:
            _configure_keyring(remote_service_name(rid), key, uuid)
Ejemplo n.º 4
0
    def configure_remote_db(self, mysql):
        hive_site = self.dist_config.path('hive_conf') / 'hive-site.xml'
        jdbc_url = \
            "jdbc:mysql://{}:{}/{}?createDatabaseIfNotExist=true".format(
                mysql.host(), mysql.port(), mysql.database()
            )
        with utils.xmlpropmap_edit_in_place(hive_site) as props:
            props['javax.jdo.option.ConnectionURL'] = jdbc_url
            props['javax.jdo.option.ConnectionUserName'] = mysql.user()
            props['javax.jdo.option.ConnectionPassword'] = mysql.password()
            props['javax.jdo.option.ConnectionDriverName'] = \
                "com.mysql.jdbc.Driver"

        hive_env = self.dist_config.path('hive_conf') / 'hive-env.sh'
        utils.re_edit_in_place(
            hive_env, {
                r'.*export HIVE_AUX_JARS_PATH *=.*':
                ('export HIVE_AUX_JARS_PATH='
                 '/usr/share/java/mysql-connector-java.jar'),
            })

        # Now that we have db connection info, init our schema (only once)
        remote_db = hookenv.remote_service_name()
        if not unitdata.kv().get('hive.schema.initialized.%s' % remote_db):
            tool_path = "{}/bin/schematool".format(
                self.dist_config.path('hive'))
            utils.run_as('ubuntu', tool_path, '-initSchema', '-dbType',
                         'mysql')
            unitdata.kv().set('hive.schema.initialized.%s' % remote_db, True)
            unitdata.kv().flush(True)
Ejemplo n.º 5
0
def create_repo(git):
    username = git.get_remote('username')
    service = remote_service_name()
    repo_path = os.path.join(repo_root(), service+'.git')

    host.add_group(username)
    host.adduser(username, password=host.pwgen(32), shell='/usr/bin/git-shell')

    ssh_public_key = git.get_remote('ssh-public-key')
    dotssh_dir = '/home/{}/.ssh/'.format(username)
    host.mkdir(dotssh_dir, username, username, 0o700)
    host.write_file(dotssh_dir + 'authorized_keys',
                    ssh_public_key.encode('utf-8'),
                    username, username, 0o400)

    host.mkdir(repo_path, group=username, perms=0o770)
    subprocess.check_call(['git', 'init', '--bare', '--shared=group', repo_path])

    # Create server-side hook that will inform
    # clients whenever changes are committed.
    create_git_hooks(repo_path, username)

    # Make the repo owned by <username>.
    chown_repo(repo_path, username)

    # TODO(axw) read and publish all host keys.
    ssh_host_keys = [open(SSH_HOST_RSA_KEY).read()]
    git.configure(repo_path, ssh_host_keys)
    set_state('git.repo.created')
    status_set('active', '')
Ejemplo n.º 6
0
def get_sandbox_image():
    '''Return the container image location for the sandbox_image.

    Set an appropriate sandbox image based on known registries. Precedence should be:
    - related docker-registry
    - default charmed k8s registry (if related to kubernetes)
    - upstream

    :return: str container image location
    '''
    db = unitdata.kv()
    canonical_registry = 'rocks.canonical.com:443/cdk'
    upstream_registry = 'k8s.gcr.io'

    docker_registry = db.get('registry', None)
    if docker_registry:
        sandbox_registry = docker_registry['url']
    else:
        try:
            deployment = hookenv.goal_state()
        except NotImplementedError:
            relations = []
            for rid in hookenv.relation_ids('containerd'):
                relations.append(hookenv.remote_service_name(rid))
        else:
            relations = deployment.get('relations', {}).get('containerd', {})

        if any(k in relations for k in ('kubernetes-master', 'kubernetes-worker')):
            sandbox_registry = canonical_registry
        else:
            sandbox_registry = upstream_registry

    return '{}/pause-{}:3.1'.format(sandbox_registry, host.arch())
Ejemplo n.º 7
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
Ejemplo n.º 8
0
    def conversation(self, scope=None):
        """
        Get a single conversation, by scope, that this relation is currently handling.

        If the scope is not given, the correct scope is inferred by the current
        hook execution context.  If there is no current hook execution context, it
        is assume that there is only a single global conversation scope for this
        relation.  If this relation's scope is not global and there is no current
        hook execution context, then an error is raised.
        """
        if scope is None:
            if self.scope is scopes.UNIT:
                scope = hookenv.remote_unit()
            elif self.scope is scopes.SERVICE:
                scope = hookenv.remote_service_name()
            else:
                scope = self.scope
        if scope is None:
            raise ValueError(
                'Unable to determine default scope: no current hook or global scope'
            )
        for conversation in self._conversations:
            if conversation.scope == scope:
                return conversation
        else:
            raise ValueError("Conversation with scope '%s' not found" % scope)
Ejemplo n.º 9
0
def compose(scale_relation):
    """
    Start all of the Docker components. If the Compose manifest has changed the
    affected Docker containers will be recreated.

    :param scale_relation: Relation object for the charm that is going to be
                           autoscaled.
    :type scale_relation: JujuInfoClient
    """
    class ComposeException(Exception):
        pass

    try:
        scale_relation_ids = scale_relation.conversation().relation_ids

        if len(scale_relation_ids) > 1:
            raise ComposeException("Cannot scale more than one application at "
                                   "the same time. Deploy more CharmScalers.")

        # This could happen if the state hasn't been updated yet but the
        # relation is removed.
        if len(scale_relation_ids) < 1:
            raise ComposeException("Scalable charm relation was lost")

        application = hookenv.remote_service_name(scale_relation_ids[0])

        if _execute("compose_up", cfg, application, classinfo=DockerComponent,
                    pre_healthcheck=False):
            set_state("charmscaler.composed")
            return
    except ComposeException as err:
        msg = "Error while composing: {}".format(err)

        hookenv.status_set("blocked", msg)
        hookenv.log(msg, level=hookenv.ERROR)
Ejemplo n.º 10
0
    def configure_remote_db(self, mysql):
        hive_site = self.dist_config.path('hive_conf') / 'hive-site.xml'
        jdbc_url = \
            "jdbc:mysql://{}:{}/{}?createDatabaseIfNotExist=true".format(
                mysql.host(), mysql.port(), mysql.database()
            )
        with utils.xmlpropmap_edit_in_place(hive_site) as props:
            props['javax.jdo.option.ConnectionURL'] = jdbc_url
            props['javax.jdo.option.ConnectionUserName'] = mysql.user()
            props['javax.jdo.option.ConnectionPassword'] = mysql.password()
            props['javax.jdo.option.ConnectionDriverName'] = \
                "com.mysql.jdbc.Driver"

        hive_env = self.dist_config.path('hive_conf') / 'hive-env.sh'
        utils.re_edit_in_place(hive_env, {
            r'.*export HIVE_AUX_JARS_PATH *=.*':
            ('export HIVE_AUX_JARS_PATH='
             '/usr/share/java/mysql-connector-java.jar'),
        })

        # Now that we have db connection info, init our schema (only once)
        remote_db = hookenv.remote_service_name()
        if not unitdata.kv().get('hive.schema.initialized.%s' % remote_db):
            tool_path = "{}/bin/schematool".format(
                self.dist_config.path('hive'))
            utils.run_as(
                'ubuntu', tool_path, '-initSchema', '-dbType', 'mysql')
            unitdata.kv().set('hive.schema.initialized.%s' % remote_db, True)
            unitdata.kv().flush(True)
Ejemplo n.º 11
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {
            'namespace': namespace,
            'scope': scope,
            'units': []
        })
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
Ejemplo n.º 12
0
 def relation_ids(self):
     """
     The set of IDs of the specific relation instances that this conversation
     is communicating with.
     """
     relation_ids = []
     services = set(unit.split('/')[0] for unit in self.units)
     for relation_id in hookenv.relation_ids(self.relation_name):
         if hookenv.remote_service_name(relation_id) in services:
             relation_ids.append(relation_id)
     return relation_ids
Ejemplo n.º 13
0
 def relation_ids(self):
     """
     The set of IDs of the specific relation instances that this conversation
     is communicating with.
     """
     relation_ids = []
     services = set(unit.split('/')[0] for unit in self.units)
     for relation_id in hookenv.relation_ids(self.relation_name):
         if hookenv.remote_service_name(relation_id) in services:
             relation_ids.append(relation_id)
     return relation_ids
Ejemplo n.º 14
0
def get_client_application_name(relid, unit):
    """Retrieve client application name from relation data.

    :param relid: Realtion ID
    :type relid: str
    :param unit: Remote unit name
    :type unit: str
    """
    if not unit:
        unit = remote_unit()
    app_name = relation_get(rid=relid, unit=unit).get(
        'application-name', hookenv.remote_service_name(relid=relid))
    return app_name
Ejemplo n.º 15
0
def get_cert_relation_ca_name(cert_relation_id=None):
    """Determine CA certificate name as provided by relation.

    The filename on disk depends on the name chosen for the application on the
    providing end of the certificates relation.

    :param cert_relation_id: (Optional) Relation id providing the certs
    :type cert_relation_id: str
    :returns: CA certificate filename without path nor extension
    :rtype: str
    """
    if cert_relation_id is None:
        try:
            cert_relation_id = relation_ids('certificates')[0]
        except IndexError:
            return ''
    return '{}_juju_ca_cert'.format(
        remote_service_name(relid=cert_relation_id))
def ceph_access(rid=None, unit=None):
    '''Setup libvirt secret for specific ceph backend access'''
    key = relation_get('key', unit, rid)
    uuid = relation_get('secret-uuid', unit, rid)
    if key and uuid:
        remote_service = remote_service_name(rid)
        if config('virt-type') in LIBVIRT_TYPES:
            secrets_filename = CEPH_BACKEND_SECRET.format(remote_service)
            render(os.path.basename(CEPH_SECRET), secrets_filename,
                   context={'ceph_secret_uuid': uuid,
                            'service_name': remote_service})
            create_libvirt_secret(secret_file=secrets_filename,
                                  secret_uuid=uuid,
                                  key=key)
        # NOTE(jamespage): LXD ceph integration via host rbd mapping, so
        #                  install keyring for rbd commands to use
        ensure_ceph_keyring(service=remote_service,
                            user='******', group='nova',
                            key=key)
Ejemplo n.º 17
0
def ceph_access(rid=None, unit=None):
    '''Setup libvirt secret for specific ceph backend access'''
    key = relation_get('key', unit, rid)
    uuid = relation_get('secret-uuid', unit, rid)
    if key and uuid:
        remote_service = remote_service_name(rid)
        if config('virt-type') in LIBVIRT_TYPES:
            secrets_filename = CEPH_BACKEND_SECRET.format(remote_service)
            render(os.path.basename(CEPH_SECRET), secrets_filename,
                   context={'ceph_secret_uuid': uuid,
                            'service_name': remote_service})
            create_libvirt_secret(secret_file=secrets_filename,
                                  secret_uuid=uuid,
                                  key=key)
        # NOTE(jamespage): LXD ceph integration via host rbd mapping, so
        #                  install keyring for rbd commands to use
        ensure_ceph_keyring(service=remote_service,
                            user='******', group='nova',
                            key=key)
Ejemplo n.º 18
0
def _manage_ca_certs(ca, cert_relation_id):
    """Manage CA certs.

    :param ca: CA Certificate from certificate relation.
    :type ca: str
    :param cert_relation_id: Relation id providing the certs
    :type cert_relation_id: str
    """
    config_ssl_ca = config('ssl_ca')
    config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE)
    if config_ssl_ca:
        log(
            "Installing CA certificate from charm ssl_ca config to {}".format(
                config_cert_file), INFO)
        install_ca_cert(b64decode(config_ssl_ca).rstrip(),
                        name=CONFIG_CA_CERT_FILE)
    elif os.path.exists(config_cert_file):
        log("Removing CA certificate {}".format(config_cert_file), INFO)
        os.remove(config_cert_file)
    log("Installing CA certificate from certificate relation", INFO)
    install_ca_cert(ca.encode(),
                    name='{}_juju_ca_cert'.format(
                        remote_service_name(relid=cert_relation_id)))
Ejemplo n.º 19
0
def client_relation(relid=None, unit=None):
    if ready_for_service():
        log('mon cluster in quorum and osds bootstrapped '
            '- providing client with keys, processing broker requests')
        service_name = hookenv.remote_service_name(relid=relid)
        if not service_name:
            log('Unable to determine remote service name, deferring '
                'processing of broker requests')
            return
        public_addr = get_public_addr()
        data = {
            'key': ceph.get_named_key(service_name),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr
        }
        rbd_features = get_rbd_features()
        if rbd_features:
            data['rbd-features'] = rbd_features
        if not unit:
            unit = remote_unit()
        data.update(
            handle_broker_request(relid, unit, add_legacy_response=True))
        relation_set(relation_id=relid, relation_settings=data)
Ejemplo n.º 20
0
    def conversation(self, scope=None):
        """
        Get a single conversation, by scope, that this relation is currently handling.

        If the scope is not given, the correct scope is inferred by the current
        hook execution context.  If there is no current hook execution context, it
        is assume that there is only a single global conversation scope for this
        relation.  If this relation's scope is not global and there is no current
        hook execution context, then an error is raised.
        """
        if scope is None:
            if self.scope is scopes.UNIT:
                scope = hookenv.remote_unit()
            elif self.scope is scopes.SERVICE:
                scope = hookenv.remote_service_name()
            else:
                scope = self.scope
        if scope is None:
            raise ValueError('Unable to determine default scope: no current hook or global scope')
        for conversation in self._conversations:
            if conversation.scope == scope:
                return conversation
        else:
            raise ValueError("Conversation with scope '%s' not found" % scope)