Exemple #1
0
def make_pod_spec():
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()

    user = cfg.get('user')
    set_flag('user', user)
    password = cfg.get('password')
    set_flag('password', password)
    database = cfg.get('database')
    set_flag('database', database)
    root_password = cfg.get('root_password')
    set_flag('root_password', root_password)

    data = {
        'name': md.get('name'),
        'image': cfg.get('mysql_image'),
        'port': cfg.get('mysql_port'),
        'user': user,
        'password': password,
        'database': database,
        'root_password': root_password,
    }
    data.update(cfg)
    return pod_spec_template % data
Exemple #2
0
def make_pod_spec():
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()

    user = cfg.get('user')
    set_flag('user', user)
    password = cfg.get('password')
    set_flag('password', password)
    database = cfg.get('database')
    set_flag('database', database)
    root_password = cfg.get('root_password')
    set_flag('root_password', root_password)

    image_info = layer.docker_resource.get_info('mysql_image')

    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'port': cfg.get('mysql_port'),
        'user': user,
        'password': password,
        'database': database,
        'root_password': root_password,
    }
    data.update(cfg)
    return pod_spec_template % data
Exemple #3
0
def init_relays():
    md = hookenv.metadata()
    for role in ('provides', 'requires'):
        for endpoint_name, endpoint_info in md.get(role, {}).items():
            if endpoint_info and endpoint_info.get('relay'):
                create_relay_hooks(endpoint_name, endpoint_info, endpoint_info.get('relay'), role=role)
    set_state('relay.init')
def get_remote_unit_name():
    for rel_type in hookenv.metadata()['requires'].keys():
        rels = hookenv.relations_of_type(rel_type)
        if rels and len(rels) >= 1:
            rel = rels[0]
            if rel['private-address'] == hookenv.unit_private_ip():
                return rel['__unit__']
Exemple #5
0
def make_pod_spec(kafka_host, kafka_port, mongo_uri):
    """Make pod specification for Kubernetes

    Args:
        kafka_host (str): Kafka hostname or IP
        kafka_port (int): Kafka port
        mongo_host (str): Mongo URI
    Returns:
        pod_spec: Pod specification for Kubernetes
    """

    with open("reactive/spec_template.yaml") as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()
    data = {
        "name": md.get("name"),
        "docker_image": cfg.get("image"),
        "kafka_host": kafka_host,
        "kafka_port": kafka_port,
        "mongo_uri": mongo_uri,
    }
    data.update(cfg)
    return pod_spec_template % data
Exemple #6
0
def make_pod_spec(
    mysql_host, mysql_port, mysql_user, mysql_password, mysql_root_password
):
    """Make pod specification for Kubernetes

    Args:
        mysql_name (str): RO DB name
        mysql_host (str): RO DB host
        mysql_port (int): RO DB port
        mysql_user (str): RO DB user
        mysql_password (str): RO DB password
    Returns:
        pod_spec: Pod specification for Kubernetes
    """

    with open("reactive/spec_template.yaml") as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()

    data = {
        "name": md.get("name"),
        "docker_image": cfg.get("image"),
        "mysql_host": mysql_host,
        "mysql_port": mysql_port,
        "mysql_user": mysql_user,
        "mysql_password": mysql_password,
        "mysql_root_password": mysql_root_password,
    }
    data.update(cfg)

    return pod_spec_template % data
def make_pod_spec():

    def load_template(name, path=None):
        """ load template file
        :param str name: name of template file
        :param str path: alternate location of template location
        """
        env = Environment(
            loader=FileSystemLoader(os.path.join(charm_dir(), 'templates')))
        return env.get_template(name)

    md = metadata()

    image_info = layer.docker_resource.get_info('bitcoind_image')

    data = {
        'env': {},
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
    }
    for key, val in config().items():
        if key.startswith("btc"):
            data['env'][key.upper().replace("-", "_")] = val

    app_yml = load_template('spec_template.yaml')
    app_yml = app_yml.render(data=data)

    return app_yml
Exemple #8
0
def make_pod_spec(wp_db_host, wp_db_user, wp_db_password):
    """Make pod specification for Kubernetes

    Returns:
        pod_spec: Pod specification for Kubernetes
    """
    image_info = layer.docker_resource.get_info('wordpress-image')

    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()

    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'wordpress_db_host': wp_db_host,
        'wordpress_db_user': wp_db_user,
        'wordpress_db_password': wp_db_password,
    }
    data.update(cfg)
    return pod_spec_template % data
Exemple #9
0
def auto_encrypt():
    metadata = hookenv.metadata()
    for storage_name, storage_metadata in metadata.get('storage', {}).items():
        if storage_metadata.get('vaultlocker-encrypt', False):
            mountbase = storage_metadata.get('vaultlocker-mountbase')
            layer.vaultlocker.encrypt_storage(storage_name, mountbase)
    set_flag('layer.vaultlocker.ready')
def check_updates():
    # The upgrade-charm hook is called for resource updates as well as
    # charm code updates, so force all previously fetched resources to
    # be fetched again (which will set the changed flag, if appropriate).
    resources = hookenv.metadata().get('resources', {})
    for name, resource in resources.items():
        if is_flag_set('layer.docker-resource.{}.fetched'.format(name)):
            layer.docker_resource.fetch(name)
Exemple #11
0
    def test_metadata(self):
        open_ = mock_open()
        open_.return_value = io.BytesIO(CHARM_METADATA)

        with patch('charmhelpers.core.hookenv.open', open_, create=True):
            with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}):
                metadata = hookenv.metadata()
        self.assertEqual(metadata, yaml.safe_load(CHARM_METADATA))
def auto_fetch():
    resources = hookenv.metadata().get('resources', {})
    for name, resource in resources.items():
        is_docker = resource.get('type') == 'oci-image'
        is_auto_fetch = resource.get('auto-fetch', False)
        if is_docker and is_auto_fetch:
            layer.docker_resource.fetch(name)
    set_flag('layer.docker-resource.auto-fetched')
Exemple #13
0
def init_relays():
    md = hookenv.metadata()
    for role in ('provides', 'requires'):
        for endpoint_name, endpoint_info in md.get(role, {}).items():
            if endpoint_info and endpoint_info.get('relay'):
                create_relay_hooks(endpoint_name,
                                   endpoint_info,
                                   endpoint_info.get('relay'),
                                   role=role)
    set_state('relay.init')
Exemple #14
0
def make_pod_spec(dbcfg):
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = Template(spec_file.read())

    md = metadata()
    cfg = config()
    data = {
        'name': md.get('name'),
        'image': cfg.get('gitlab_image'),
        'port': cfg.get('http_port'),
        'config': '; '.join([compose_config(cfg), dbcfg])
    }
    return pod_spec_template.substitute(data)
Exemple #15
0
def make_pod_spec():
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()

    image_info = layer.docker_resource.get_info('redis_image')

    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
    }
    return pod_spec_template % data
Exemple #16
0
def make_pod_spec(dbcfg):
    image_info = layer.docker_resource.get_info('gitlab_image')

    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()
    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'port': cfg.get('http_port'),
        'config': '; '.join([compose_config(cfg), dbcfg])
    }
    return pod_spec_template % data
Exemple #17
0
def poll_remote():
    env = {}
    env.update(os.environ)
    env['ETCDCTL_ENDPOINT'] = hookenv.config().get('etcd')

    kv = unitdata.kv()
    local_role = kv.get('relay.local.relation.role')
    local_relname = kv.get('relay.local.relation.name')
    remote_role = kv.get('relay.remote.relation.role')
    print((local_role, local_relname, remote_role))
    if not local_role or not local_relname or not remote_role:
        hookenv.status_set('blocked', 'waiting for relation')
        return
    hookenv.status_set('active', 'ready')

    md = hookenv.metadata()
    for endpoint_name, endpoint_info in md.get(local_role, {}).items():
        if not endpoint_info or not endpoint_info.get('relay'):
            continue
        relay_name = endpoint_info.get('relay')
        relations = hookenv.role_and_interface_to_relations(
            local_role, endpoint_info['interface'])
        hookenv.log('relay=%s relations=%s' % (relay_name, relations))
        for relation_name in relations:
            etcd_path = '/%s/%s' % (relay_name, local_role)
            try:
                remote_data_json = check_output(['etcdctl', 'get', etcd_path],
                                                env=env,
                                                universal_newlines=True)
                remote_data = json.loads(remote_data_json)
            except CalledProcessError as cpe:
                if cpe.returncode == 4:
                    # Not found -- no data
                    remote_data = {}
                else:
                    hookenv.log('failed to relay %s: %s' % (etcd_path, cpe))
                    continue
            except Exception as e:
                hookenv.log('failed to relay %s: %s' % (etcd_path, e))
                continue
            if data_changed(etcd_path, remote_data):
                for rid in hookenv.relation_ids(relation_name):
                    hookenv.relation_set(relation_id=rid, **remote_data)
                    hookenv.log('relayed %s to relation %s' % (etcd_path, rid))
Exemple #18
0
def poll_remote():
    env = {}
    env.update(os.environ)
    env['ETCDCTL_ENDPOINT'] = hookenv.config().get('etcd')

    kv = unitdata.kv()
    local_role = kv.get('relay.local.relation.role')
    local_relname = kv.get('relay.local.relation.name')
    remote_role = kv.get('relay.remote.relation.role')
    print((local_role, local_relname, remote_role))
    if not local_role or not local_relname or not remote_role:
        hookenv.status_set('blocked', 'waiting for relation')
        return
    hookenv.status_set('active', 'ready')

    md = hookenv.metadata()
    for endpoint_name, endpoint_info in md.get(local_role, {}).items():
        if not endpoint_info or not endpoint_info.get('relay'):
            continue
        relay_name = endpoint_info.get('relay')
        relations = hookenv.role_and_interface_to_relations(local_role, endpoint_info['interface'])
        hookenv.log('relay=%s relations=%s' % (relay_name, relations))
        for relation_name in relations:
            etcd_path = '/%s/%s' % (relay_name, local_role)
            try:
                remote_data_json = check_output(['etcdctl', 'get', etcd_path], env=env, universal_newlines=True)
                remote_data = json.loads(remote_data_json)
            except CalledProcessError as cpe:
                if cpe.returncode == 4:
                    # Not found -- no data
                    remote_data = {}
                else:
                    hookenv.log('failed to relay %s: %s' % (etcd_path, cpe))
                    continue
            except Exception as e:
                hookenv.log('failed to relay %s: %s' % (etcd_path, e))
                continue
            if data_changed(etcd_path, remote_data):
                for rid in hookenv.relation_ids(relation_name):
                    hookenv.relation_set(relation_id=rid, **remote_data)
                    hookenv.log('relayed %s to relation %s' % (etcd_path, rid))
Exemple #19
0
def make_pod_spec(dbcfg):
    image_info = layer.docker_resource.get_info('mediawiki_image')

    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    md = metadata()
    cfg = config()
    mysql = endpoint_from_flag('db.available')
    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'http_port': cfg.get('http_port'),
        'host': mysql.host(),
        'database': mysql.database(),
        'user': mysql.user(),
        'password': mysql.password(),
    }
    return pod_spec_template % data
Exemple #20
0
def make_container_spec(dbcfg):
    spec_file = open('reactive/spec_template.yaml')
    pod_spec_template = Template(spec_file.read())

    md = metadata()
    cfg = config()

    data = {
        'name': md.get('name'),
        'image': cfg.get('image'),
        'port': cfg.get('live_port'),
        'camera_url': cfg.get('camera_url'),
        'target_dir': cfg.get('target_dir'),
        'db_dbname': dbcfg.database(),
        'db_host': dbcfg.host(),
        'db_user': dbcfg.user(),
        'db_pass': dbcfg.password(),
        'db_port': dbcfg.port()
    }

    log('using data:\n{}'.format(data))
    return pod_spec_template.substitute(data)
def make_pod_spec():
    """Make pod specification for Kubernetes

    Returns:
        pod_spec: Pod specification for Kubernetes
    """
    md = metadata()
    cfg = config()
    image_info = layer.docker_resource.get_info('ubuntu-image')
    with open('reactive/spec_template.yaml') as spec_file:
        pod_spec_template = spec_file.read()

    app_name = application_name()

    data = {
        'name': md.get('name'),
        'docker_image_path': image_info.registry_path,
        'docker_image_username': image_info.username,
        'docker_image_password': image_info.password,
        'application_name': app_name,
    }
    data.update(cfg)
    return pod_spec_template % data
Exemple #22
0
def encrypt_storage(storage_name, mountbase=None):
    """
    Set up encryption for the given Juju storage entry, and optionally create
    and mount XFS filesystems on the encrypted storage entry location(s).

    Note that the storage entry **must** be defined with ``type: block``.

    If ``mountbase`` is not given, the location(s) will not be formatted or
    mounted.  When interacting with or mounting the location(s) manually, the
    name returned by :func:`decrypted_device` called on the storage entry's
    location should be used in place of the raw location.

    If the storage is defined as ``multiple``, the individual locations
    will be mounted at ``{mountbase}/{storage_name}/{num}`` where ``{num}``
    is based on the storage ID.  Otherwise, the storage will mounted at
    ``{mountbase}/{storage_name}``.
    """
    metadata = hookenv.metadata()
    storage_metadata = metadata['storage'][storage_name]
    if storage_metadata['type'] != 'block':
        raise VaultLockerError('Cannot encrypt non-block storage: {}',
                               storage_name)
    multiple = 'multiple' in storage_metadata
    for storage_id in hookenv.storage_list():
        if not storage_id.startswith(storage_name + '/'):
            continue
        storage_location = hookenv.storage_get('location', storage_id)
        if mountbase and multiple:
            mountpoint = Path(mountbase) / storage_id
        elif mountbase:
            mountpoint = Path(mountbase) / storage_name
        else:
            mountpoint = None
        encrypt_device(storage_location, mountpoint)
        set_flag('layer.vaultlocker.{}.ready'.format(storage_id))
        set_flag('layer.vaultlocker.{}.ready'.format(storage_name))
Exemple #23
0
def list_supported_plugins():
    return [k for k in hookenv.metadata()['requires'].keys()
            if k != 'juju-info'] + \
        [k for k in hookenv.metadata()['provides'].keys()
            if k != 'juju-info']
# pylint: disable=unused-argument
from charms.reactive import when, when_not
from charmhelpers.core import hookenv


if hookenv.metadata()['name'] == 'hadoop-client':
    # only report status if deployed as standalone client,
    # not if used as a base layer
    @when('hadoop.installed')
    def report_ready(hadoop):
        hookenv.status_set('active', 'ready')

    @when_not('hadoop.joined')
    def report_blocked():
        hookenv.status_set('blocked', 'waiting for relation to hadoop plugin')

    @when('hadoop.joined')
    @when_not('hadoop.installed')
    def report_waiting_for_hadoop(hadoop):
        hookenv.status_set('waiting', 'waiting for plugin to become ready')

    @when('java.connected')
    @when_not('java.ready')
    def report_waiting_for_java(hadoop):
        hookenv.status_set('waiting', 'waiting for java to become ready')


@when('hadoop.joined', 'java.ready')
def proxy_java(hadoop, java):
    hadoop.set_java_info(java.java_home(), java.java_version())
    def get_repo_url(self, bigtop_version=None):
        """
        Construct our package repo based on the given bigtop version.

        The package repository is dependent on the bigtop version and
        OS attributes. Construct an appropriate value to use as our site
        bigtop::bigtop_repo_uri param.

        Param string Bigtop version ('1.1.0' or 'master')
        Return Bigtop repository URL
        Raise BigtopError if we have an unexpected version string.
        """
        bigtop_repo_url = None
        release_info = lsb_release()
        repo_arch = utils.cpu_arch().lower()

        dist_name = release_info['DISTRIB_ID'].lower()
        # NB: Set 16.04/xenial as defaults since that matches current bigtop
        # repos. Installation on non-LTS will work with these values.
        dist_release = "16.04"
        dist_series = "xenial"

        # Fail fast if we're not on ubuntu
        if dist_name != 'ubuntu':
            raise BigtopError(
                u"Charms currently only support Bigtop on Ubuntu.")

        if bigtop_version == '1.1.0':
            repo_url = ('http://bigtop-repos.s3.amazonaws.com/releases/'
                        '{version}/{dist}/{series}/{arch}')
            # NB: For 1.1.0, x86 must install from the trusty repo;
            # ppc64le only works from vivid.
            if repo_arch == "ppc64le":
                dist_series = "vivid"
                # 'le' and 'el' are swapped due to historical awfulness:
                #   https://lists.debian.org/debian-powerpc/2014/08/msg00042.html
                repo_arch = "ppc64el"
            else:
                dist_series = "trusty"
            # Substitute params.
            bigtop_repo_url = repo_url.format(
                version=self.bigtop_version,
                dist=dist_name,
                series=dist_series,
                arch=repo_arch
            )
        elif bigtop_version == '1.2.0':
            repo_url = ('http://bigtop-repos.s3.amazonaws.com/releases/'
                        '{version}/{dist}/{release}/{arch}')
            # Substitute params.
            bigtop_repo_url = repo_url.format(
                version=self.bigtop_version,
                dist=dist_name,
                release=dist_release,
                arch=repo_arch
            )
        elif bigtop_version == '1.2.1':
            # NB: Kafka is no longer served from official repos [1], nor are
            # there non-x86 repos available for 1.2.1. Handle these cases by
            # using the bigtop CI repository.
            # [1]: http://mail-archives.apache.org/mod_mbox/bigtop-announce/201708.mbox/thread
            if hookenv.metadata()['name'] == 'kafka' or repo_arch != "x86_64":
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-1.2.1/OS=ubuntu-16.04/'
                                   'lastSuccessfulBuild/artifact/output/apt')
            else:
                repo_url = ('http://repos.bigtop.apache.org/releases/'
                            '{version}/{dist}/{release}/{arch}')
                # Substitute params.
                bigtop_repo_url = repo_url.format(
                    version=self.bigtop_version,
                    dist=dist_name,
                    release=dist_release,
                    arch=repo_arch
                )
        elif bigtop_version == '1.3.0':
            # NB: Kafka is no longer served from official repos [1], nor are
            # there non-x86 repos available for 1.2.1. Handle these cases by
            # using the bigtop CI repository.
            # [1]: http://mail-archives.apache.org/mod_mbox/bigtop-announce/201708.mbox/thread
            #if hookenv.metadata()['name'] == 'kafka' or repo_arch != "x86_64":
            bigtop_repo_url = ('https://ci.bigtop.apache.org/job/Bigtop-1.3.0/'
                                   'DISTRO=ubuntu-16.04,PLATFORM=amd64-slave/'
                                   'lastSuccessfulBuild/artifact/output/apt/')
            # else:
            #     repo_url = ('http://repos.bigtop.apache.org/releases/'
            #                 '{version}/{dist}/{release}/{arch}')
            #     # Substitute params.
            #     bigtop_repo_url = repo_url.format(
            #         version=self.bigtop_version,
            #         dist=dist_name,
            #         release=dist_release,
            #         arch=repo_arch
            #     )
        elif bigtop_version == 'master':
            if repo_arch == "x86_64":
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-trunk-repos/'
                                   'OS=ubuntu-16.04,label=docker-slave/'
                                   'ws/output/apt')
            else:
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-trunk-repos/'
                                   'OS=ubuntu-16.04-{},label=docker-slave/'
                                   'ws/output/apt'.format(repo_arch))
        else:
            raise BigtopError(
                u"Unknown Bigtop version for repo_url: {}".format(bigtop_version))

        return bigtop_repo_url
    def get_repo_url(self, bigtop_version=None):
        """
        Construct our package repo based on the given bigtop version.

        The package repository is dependent on the bigtop version and
        OS attributes. Construct an appropriate value to use as our site
        bigtop::bigtop_repo_uri param.

        Param string Bigtop version ('1.1.0' or 'master')
        Return Bigtop repository URL
        Raise BigtopError if we have an unexpected version string.
        """
        bigtop_repo_url = None
        release_info = lsb_release()
        repo_arch = utils.cpu_arch().lower()

        dist_name = release_info['DISTRIB_ID'].lower()
        # NB: Set 16.04/xenial as defaults since that matches current bigtop
        # repos. Installation on non-LTS will work with these values.
        dist_release = "16.04"
        dist_series = "xenial"

        # Fail fast if we're not on ubuntu
        if dist_name != 'ubuntu':
            raise BigtopError(
                u"Charms currently only support Bigtop on Ubuntu.")

        if bigtop_version == '1.1.0':
            repo_url = ('http://bigtop-repos.s3.amazonaws.com/releases/'
                        '{version}/{dist}/{series}/{arch}')
            # NB: For 1.1.0, x86 must install from the trusty repo;
            # ppc64le only works from vivid.
            if repo_arch == "ppc64le":
                dist_series = "vivid"
                # 'le' and 'el' are swapped due to historical awfulness:
                #   https://lists.debian.org/debian-powerpc/2014/08/msg00042.html
                repo_arch = "ppc64el"
            else:
                dist_series = "trusty"
            # Substitute params.
            bigtop_repo_url = repo_url.format(
                version=self.bigtop_version,
                dist=dist_name,
                series=dist_series,
                arch=repo_arch
            )
        elif bigtop_version == '1.2.0':
            repo_url = ('http://bigtop-repos.s3.amazonaws.com/releases/'
                        '{version}/{dist}/{release}/{arch}')
            # Substitute params.
            bigtop_repo_url = repo_url.format(
                version=self.bigtop_version,
                dist=dist_name,
                release=dist_release,
                arch=repo_arch
            )
        elif bigtop_version == '1.2.1':
            # NB: Kafka is no longer served from official repos [1], nor are
            # there non-x86 repos available for 1.2.1. Handle these cases by
            # using the bigtop CI repository.
            # [1]: http://mail-archives.apache.org/mod_mbox/bigtop-announce/201708.mbox/thread
            if hookenv.metadata()['name'] == 'kafka' or repo_arch != "x86_64":
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-1.2.1/OS=ubuntu-16.04/'
                                   'lastSuccessfulBuild/artifact/output/apt')
            else:
                repo_url = ('http://repos.bigtop.apache.org/releases/'
                            '{version}/{dist}/{release}/{arch}')
                # Substitute params.
                bigtop_repo_url = repo_url.format(
                    version=self.bigtop_version,
                    dist=dist_name,
                    release=dist_release,
                    arch=repo_arch
                )
        elif bigtop_version == 'master':
            if repo_arch == "x86_64":
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-trunk-repos/'
                                   'OS=ubuntu-16.04,label=docker-slave/'
                                   'ws/output/apt')
            else:
                bigtop_repo_url = ('https://ci.bigtop.apache.org/'
                                   'job/Bigtop-trunk-repos/'
                                   'OS=ubuntu-16.04-{},label=docker-slave/'
                                   'ws/output/apt'.format(repo_arch))
        else:
            raise BigtopError(
                u"Unknown Bigtop version for repo_url: {}".format(bigtop_version))

        return bigtop_repo_url
def _implicit_peer_relation_name():
    md = hookenv.metadata()
    if 'peers' in md:
        return sorted(md['peers'].keys())[0]
    return None
Exemple #28
0
def make_pod_spec():
    """Generate the pod spec.
    """

    md = metadata()
    cfg = config()

    image_info = layer.docker_resource.get_info(ECK_OPERATOR)

    roles = get_roles_from_yaml()
    operator = get_operator_from_yaml()
    operator_namespace = model_name()
    operator_roles = cfg.get('operator-roles')
    service_account_name = application_name()
    service_name = md.get('name')

    spec = operator['spec']['template']['spec']
    container = spec['containers'][0]
    operator_ports = container['ports']
    operator_resources = container['resources']
    termination_grace_period_seconds = spec['terminationGracePeriodSeconds']

    pod_spec = {
        'version': 2,
        'containers': [
            {
                'name': service_name,
                'args': [
                    "manager",
                    "--operator-roles",
                    operator_roles,
                    "--operator-namespace",
                    operator_namespace,
                    "--enable-debug-logs=false",
                ],
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'imagePullPolicy': "IfNotPresent",
                'config': {
                    'OPERATOR_NAMESPACE': operator_namespace,
                    'WEBHOOK_SECRET': "webhook-server-secret",
                    'WEBHOOK_PODS_LABEL': "elastic-operator",
                    'OPERATOR_IMAGE': image_info.registry_path,
                },
                'resources': operator_resources,
                'ports': operator_ports,
                'terminationMessagePath': "/dev/termination-log",
                'terminationMessagePolicy': "File",
            },
        ],
        'dnsPolicy': "ClusterFirst",
        'restartPolicy': "Always",
        'serviceAccountName': service_account_name,
        'serviceAccountRoles': {
            'automountServiceAccountToken': True,
            'rules': roles,
        },
        'terminationGracePeriodSeconds': termination_grace_period_seconds,
    }
    return pod_spec
Exemple #29
0
def _implicit_peer_relation_name():
    md = hookenv.metadata()
    assert 'peers' in md, 'No peer relations in metadata.yaml'
    return sorted(md['peers'].keys())[0]
def _implicit_peer_relation_name():
    md = hookenv.metadata()
    assert 'peers' in md, 'No peer relations in metadata.yaml'
    return sorted(md['peers'].keys())[0]
# pylint: disable=unused-argument
from charms.reactive import when, when_not
from charmhelpers.core import hookenv
from charms import layer

if hookenv.metadata()['name'] == 'hadoop-client':
    # only report Ready status if deployed as standalone client,
    # not if used as a base layer
    @when('hadoop.installed')
    def report_ready(hadoop):
        hookenv.status_set('active', 'Ready')


@when_not('hadoop.joined')
def report_blocked():
    cfg = layer.options('hadoop-client')
    if not cfg.get('silent'):
        hookenv.status_set('blocked', 'Waiting for relation to Hadoop Plugin')


@when('hadoop.joined')
@when_not('hadoop.installed')
def report_waiting(hadoop):
    cfg = layer.options('hadoop-client')
    if not cfg.get('silent'):
        hookenv.status_set('waiting', 'Waiting for Plugin to become ready')