示例#1
0
def ensure_sufficient_masters():
    """Redis enforces us to use at minimum 3 master nodes.
    Set leader flag indicating we have met the minimum # nodes.
    """

    if is_flag_set('endpoint.cluster.peer.joined'):
        endpoint = 'endpoint.cluster.peer.joined'
    elif is_flag_set('endpoint.cluster.peer.changed'):
        endpoint = 'endpoint.cluster.peer.changed'
    else:
        status.blocked('No peer endpoint set')
        return

    # Get the peers, check for min length
    peers = endpoint_from_flag(endpoint).all_units
    peer_ips = [peer._data['private-address']
                for peer in peers if peer._data is not None]
    if len(peer_ips) > 1:
        status.active(
            "Minimum # masters available, got {}.".format(len(peer_ips)+1))
        init_masters = \
            ",".join(peer_ips + [unit_private_ip()])
        charms.leadership.leader_set(init_masters=init_masters)

    clear_flag('endpoint.cluster.peer.joined')
    clear_flag('endpoint.cluster.peer.changed')
示例#2
0
def initialize_new_leader():
    """Create an initial cluster string to bring up a single member cluster of
    etcd, and set the leadership data so the followers can join this one."""
    bag = EtcdDatabag()
    bag.token = bag.token
    bag.set_cluster_state("new")
    address = get_ingress_address("cluster")
    cluster_connection_string = get_connection_string([address], bag.management_port)
    bag.set_cluster("{}={}".format(bag.unit_name, cluster_connection_string))

    render_config(bag)
    host.service_restart(bag.etcd_daemon)

    # sorry, some hosts need this. The charm races with systemd and wins.
    time.sleep(2)

    # Check health status before we say we are good
    etcdctl = EtcdCtl()
    status = etcdctl.cluster_health()
    if "unhealthy" in status:
        status.blocked("Cluster not healthy.")
        return
    # We have a healthy leader, broadcast initial data-points for followers
    open_port(bag.port)
    leader_connection_string = get_connection_string([address], bag.port)
    leader_set({"leader_address": leader_connection_string, "cluster": bag.cluster})

    # set registered state since if we ever become a follower, we will not need
    # to re-register
    set_state("etcd.registered")

    # finish bootstrap delta and set configured state
    set_state("etcd.leader.configured")
示例#3
0
def reregister_connector():
    status.maintenance('Reregistering connector')
    if not register_latest_connector():
        status.blocked('Could not reregister previous connectors, trying next hook..')
    else:
        status.active('ready')
        clear_flag('kafka-connect-base.unregistered')
示例#4
0
def push_filebeat_index(elasticsearch):
    """Create the Filebeat index in Elasticsearch.

    Once elasticsearch is available, make 5 attempts to create a filebeat
    index. Set appropriate charm status so the operator knows when ES is
    configured to accept data.
    """
    hosts = elasticsearch.list_unit_data()
    for host in hosts:
        host_string = "{}:{}".format(host['host'], host['port'])

    max_attempts = 5
    for i in range(1, max_attempts):
        if push_beat_index(elasticsearch=host_string,
                           service='filebeat',
                           fatal=False):
            set_state('filebeat.index.pushed')
            status.active('Filebeat ready.')
            break
        else:
            msg = "Attempt {} to push filebeat index failed (retrying)".format(
                i)
            status.waiting(msg)
            time.sleep(i * 30)  # back off 30s for each attempt
    else:
        msg = "Failed to push filebeat index to http://{}".format(host_string)
        status.blocked(msg)
示例#5
0
def check_cluster_health():
    """report on the cluster health every 5 minutes"""
    etcdctl = EtcdCtl()
    health = etcdctl.cluster_health()

    # Determine if the unit is healthy or unhealthy
    if "unhealthy" in health["status"]:
        unit_health = "UnHealthy"
    else:
        unit_health = "Healthy"

    # Determine units peer count, and surface 0 by default
    try:
        peers = len(etcdctl.member_list())
    except Exception:
        unit_health = "Errored"
        peers = 0

    bp = "{0} with {1} known peer{2}"
    status_message = bp.format(unit_health, peers, "s" if peers != 1 else "")

    if unit_health in ["UnHealthy", "Errored"]:
        status.blocked(status_message)
    else:
        status.active(status_message)
def install_custom_ca():
    """
    Installs a configured CA cert into the system-wide location.
    """
    ca_cert = hookenv.config().get('custom-registry-ca')
    if ca_cert:
        try:
            # decode to bytes, as that's what install_ca_cert wants
            _ca = b64decode(ca_cert)
        except Exception:
            status.blocked(
                'Invalid base64 value for custom-registry-ca config')
            return
        else:
            host.install_ca_cert(_ca, name='juju-custom-registry')
            charm = hookenv.charm_name()
            hookenv.log(
                'Custom registry CA has been installed for {}'.format(charm))

            # manage appropriate charm flags to recycle the runtime daemon
            if charm == 'docker':
                clear_flag('docker.available')
                set_flag('docker.restart')
            elif charm == 'containerd':
                set_flag('containerd.restart')
            else:
                hookenv.log('Unknown runtime: {}. '
                            'Cannot request a service restart.'.format(charm))
def start_kc_influxdb():
    # Get all config for the InfluxDB connector.
    influxdb = endpoint_from_flag('influxdb.available')
    if not ensure_db_exists(conf.get('database'), influxdb.hostname(),
                            influxdb.port()):
        return
    connector_configs = {
        'connector.class':
        'com.datamountaineer.streamreactor.connect.influx.InfluxSinkConnector',
        'tasks.max':
        str(conf.get('max-tasks')),
        'connect.influx.url':
        'http://' + influxdb.hostname() + ':' + influxdb.port(),
        'connect.influx.db':
        conf.get('database'),
        'connect.influx.username':
        influxdb.user(),
        'connect.influx.password':
        influxdb.password(),
        'connect.influx.kcql':
        conf.get('kcql'),
        'topics':
        conf.get("topics").replace(" ", ","),
    }
    # Ask the base layer to send the config to the Kafka connect REST API.
    response = register_connector(connector_configs, INFLUXDB_CONNECTOR_NAME)
    if response and (response.status_code == 200
                     or response.status_code == 201):
        status.active('ready')
        clear_flag('kafka-connect-influxdb.stopped')
        set_flag('kafka-connect-influxdb.running')
    else:
        log('Could not register/update connector Response: ' + str(response))
        status.blocked(
            'Could not register/update connector, retrying next hook.')
示例#8
0
def configure_sources():
    """Add user specified package sources from the service configuration.

    See charmhelpers.fetch.configure_sources for details.
    """
    config = hookenv.config()

    # We don't have enums, so we need to validate this ourselves.
    package_status = config.get('package_status') or ''
    if package_status not in ('hold', 'install'):
        status.blocked('Unknown package_status {}'.format(package_status))
        # Die before further hooks are run. This isn't very nice, but
        # there is no other way to inform the operator that they have
        # invalid configuration.
        raise SystemExit(0)

    sources = config.get('install_sources') or ''
    keys = config.get('install_keys') or ''
    if reactive.helpers.data_changed('apt.configure_sources', (sources, keys)):
        fetch.configure_sources(update=False,
                                sources_var='install_sources',
                                keys_var='install_keys')
        reactive.set_flag('apt.needs_update')

    # Clumsy 'config.get() or' per Bug #1641362
    extra_packages = sorted((config.get('extra_packages') or '').split())
    if extra_packages:
        charms.apt.queue_install(extra_packages)
示例#9
0
def reset_base_flags():
    data_changed('resource-context', {})
    clear_flag('kafka-connect-base.configured')
    clear_flag('kafka-connect.running')
    status.maintenance('Unregistering connector')
    if unregister_latest_connector():
        set_flag('kafka-connect-base.unregistered')
    else:
        status.blocked('Could not unregister connectors')
示例#10
0
def add_new_peer_nodes_to_cluster():
    """Add new peers to cluster
    """

    if is_flag_set('endpoint.cluster.peer.joined'):
        endpoint = 'endpoint.cluster.peer.joined'
    elif is_flag_set('endpoint.cluster.peer.changed'):
        endpoint = 'endpoint.cluster.peer.changed'
    else:
        status.blocked('No peer endpoint set')
        return

    # Get the known application peer ip addressese from juju perspective
    peers = endpoint_from_flag(endpoint).all_units
    peer_ips = [
        peer._data['private-address'] for peer in peers
        if peer._data is not None
    ]

    # Get the known cluster node ips from redis point of view
    cluster_node_ips = [node['node_ip'] for node in get_cluster_nodes_info()]

    # Compare the nodes in the cluster to the peer nodes that juju is aware of
    # Register nodes that are juju peers, but not part of the cluster
    node_added = False
    for ip in peer_ips:
        if ip not in cluster_node_ips:
            node_added = True
            cmd = "{} --cluster add-node {}:6379 {}:6379".format(
                REDIS_CLI, ip, unit_private_ip())
            out = check_output(cmd, shell=True)
            log(out)

    # Give the cluster a second to recognize the new node
    sleep(1)

    if node_added:
        cluster_nodes = get_cluster_nodes_info()
        cluster_node_ips = [node['node_ip'] for node in cluster_nodes]
        cluster_node_ids = [node['node_id'] for node in cluster_nodes]

        charms.leadership.leader_set(
            cluster_node_ips=",".join(cluster_node_ips))
        charms.leadership.leader_set(
            cluster_nodes_json=json.dumps(cluster_nodes))

        # Generate the weights string for the rebalance command
        node_weights = " ".join(
            ["{}=1".format(node_id) for node_id in cluster_node_ids])
        cmd = ("{} --cluster rebalance --cluster-weight {} "
               "--cluster-timeout 3600 --cluster-use-empty-masters "
               "{}:6379").format(REDIS_CLI, node_weights, unit_private_ip())
        out = check_output(cmd, shell=True)
        log(out)

    clear_flag('endpoint.cluster.peer.joined')
    clear_flag('endpoint.cluster.peer.changed')
示例#11
0
def configure_registry():
    """
    Add docker registry config when present.

    :return: None
    """
    registry = endpoint_from_flag("endpoint.docker-registry.ready")
    netloc = registry.registry_netloc

    # handle tls data
    cert_subdir = netloc
    cert_dir = os.path.join(CERTIFICATE_DIRECTORY, cert_subdir)
    insecure_opt = {"insecure-registry": netloc}
    if registry.has_tls():
        # ensure the CA that signed our registry cert is trusted
        install_ca_cert(registry.tls_ca, name="juju-docker-registry")
        # remove potential insecure docker opts related to this registry
        manage_docker_opts(insecure_opt, remove=True)
        manage_registry_certs(cert_dir, remove=False)
    else:
        manage_docker_opts(insecure_opt, remove=False)
        manage_registry_certs(cert_dir, remove=True)

    # handle auth data
    if registry.has_auth_basic():
        hookenv.log("Logging into docker registry: {}.".format(netloc))
        cmd = [
            "docker",
            "login",
            netloc,
            "-u",
            registry.basic_user,
            "-p",
            registry.basic_password,
        ]
        try:
            check_output(cmd, stderr=subprocess.STDOUT)
        except CalledProcessError as e:
            if b"http response" in e.output.lower():
                # non-tls login with basic auth will error like this:
                #  Error response ... server gave HTTP response to HTTPS client
                msg = "docker login requires a TLS-enabled registry"
            elif b"unauthorized" in e.output.lower():
                # invalid creds will error like this:
                #  Error response ... 401 Unauthorized
                msg = "Incorrect credentials for docker registry"
            else:
                msg = "docker login failed, see juju debug-log"
            status.blocked(msg)
    else:
        hookenv.log("Disabling auth for docker registry: {}.".format(netloc))
        # NB: it's safe to logout of a registry that was never logged in
        check_call(["docker", "logout", netloc])

    # NB: store our netloc so we can clean up if the registry goes away
    DB.set("registry_netloc", netloc)
    set_state("docker.registry.configured")
示例#12
0
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    no_creds_msg = "missing credentials; set credentials config"
    config = hookenv.config()
    # try to use Juju's trust feature
    try:
        result = subprocess.run(
            ["credential-get"],
            check=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        creds = yaml.load(result.stdout.decode("utf8"))
        access_key = creds["credential"]["attributes"]["access-key"]
        secret_key = creds["credential"]["attributes"]["secret-key"]
        update_credentials_file(access_key, secret_key)
        return True
    except FileNotFoundError:
        pass  # juju trust not available
    except subprocess.CalledProcessError as e:
        if "permission denied" not in e.stderr.decode("utf8"):
            raise
        no_creds_msg = "missing credentials access; grant with: juju trust"

    # try credentials config
    if config["credentials"]:
        try:
            creds_data = b64decode(config["credentials"]).decode("utf8")
            creds = ConfigParser()
            try:
                creds.read_string(creds_data)
            except MissingSectionHeaderError:
                creds.read_string("[default]\n" + creds_data)
            for section in creds.sections():
                access_key = creds[section].get("aws_access_key_id")
                secret_key = creds[section].get("aws_secret_access_key")
                if access_key and secret_key:
                    update_credentials_file(access_key, secret_key)
                    return True
        except Exception:
            status.blocked("invalid value for credentials config")
            return False

    # try access-key and secret-key config
    access_key = config["access-key"]
    secret_key = config["secret-key"]
    if access_key and secret_key:
        update_credentials_file(access_key, secret_key)
        return True

    # no creds provided
    status.blocked(no_creds_msg)
    return False
示例#13
0
def install_ssl_termination():
    nginxcfg = NginxConfig()
    http_path = os.path.join(nginxcfg.http_available_path, 'http')
    ssl_term_path = os.path.join(nginxcfg.http_available_path,
                                 'ssl-termination')
    os.makedirs(http_path, exist_ok=True)
    os.makedirs(ssl_term_path, exist_ok=True)
    set_flag('ssl-termination.installed')
    if not is_flag_set('endpoint.ssl-termination.joined'):
        status.blocked('Waiting for fqdn subordinates or http relation')
示例#14
0
def set_redis_version():
    """Set redis version
    """
    version = get_redis_version()
    if version:
        application_version_set(version)
        set_flag('redis.version.set')
    else:
        status.blocked("Cannot get redis-server version")
        return
示例#15
0
def upgrade_v3_complete():
    status.maintenance('Completing Calico 3 upgrade')
    try:
        calico_upgrade.configure()
        calico_upgrade.complete()
        calico_upgrade.cleanup()
    except Exception:
        log(traceback.format_exc())
        message = 'Calico upgrade failed, see debug log'
        status.blocked(message)
        return
    leader_set({'calico-v3-completion-needed': None})
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    config = hookenv.config()

    # try credentials config
    if config['credentials']:
        try:
            creds_data = b64decode(config['credentials']).decode('utf8')
            creds_data = json.loads(creds_data)
            log('Using "credentials" config values for credentials')
            _save_creds(creds_data)
            return True
        except Exception:
            status.blocked('invalid value for credentials config')
            return False
    no_creds_msg = 'missing credentials; set credentials config'

    # try individual config
    # NB: if a user sets one of these, they better set 'em all!
    if any([
            config['vsphere_ip'], config['user'], config['password'],
            config['datacenter']
    ]):
        log('Using individual config values for credentials')
        _save_creds(config)
        return True

    # try to use Juju's trust feature
    try:
        result = subprocess.run(['credential-get'],
                                check=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        creds_data = yaml.load(result.stdout.decode('utf8'))
        # need to append the datastore, as it always comes from config
        creds_data.update({'datastore': '{}'.format(config['datastore'])})
        log('Using credentials-get for credentials')
        _save_creds(creds_data)
        return True
    except FileNotFoundError:
        pass  # juju trust not available
    except subprocess.CalledProcessError as e:
        if 'permission denied' not in e.stderr.decode('utf8'):
            raise
        no_creds_msg = 'missing credentials access; grant with: juju trust'

    # no creds provided
    status.blocked(no_creds_msg)
    return False
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    config = hookenv.config()

    # try credentials config
    if config['credentials']:
        try:
            creds_data = b64decode(config['credentials']).decode('utf8')
            creds_data = json.loads(creds_data)
            log('Using "credentials" config values for credentials')
            _save_creds(creds_data)
            return True
        except Exception:
            status.blocked('invalid value for credentials config')
            return False
    no_creds_msg = 'missing credentials; set credentials config'

    # try individual config
    if any([config['auth-url'],
            config['username'],
            config['password'],
            config['project-name'],
            config['user-domain-name'],
            config['project-domain-name']]):
        log('Using individual config values for credentials')
        _save_creds(config)
        return True

    # try to use Juju's trust feature
    try:
        result = subprocess.run(['credential-get'],
                                check=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        creds_data = yaml.load(result.stdout.decode('utf8'))

        log('Using credentials-get for credentials')
        _save_creds(creds_data)
        return True
    except FileNotFoundError:
        pass  # juju trust not available
    except subprocess.CalledProcessError as e:
        if 'permission denied' not in e.stderr.decode('utf8'):
            raise
        no_creds_msg = 'missing credentials access; grant with: juju trust'

    # no creds provided
    status.blocked(no_creds_msg)
    return False
示例#18
0
def configure_calico_pool():
    ''' Configure Calico IP pool. '''
    config = charm_config()
    if not config['manage-pools']:
        log('Skipping pool configuration')
        set_state('calico.pool.configured')
        return

    status.maintenance('Configuring Calico IP pool')

    try:
        # remove unrecognized pools, and default pool if CIDR doesn't match
        pools = calicoctl_get('pool')['items']

        cidrs = tuple(cidr.strip() for cidr in config['cidr'].split(','))
        names = tuple('ipv{}'.format(get_network(cidr).version)
                      for cidr in cidrs)
        pool_names_to_delete = [
            pool['metadata']['name'] for pool in pools
            if pool['metadata']['name'] not in names
            or pool['spec']['cidr'] not in cidrs
        ]

        for pool_name in pool_names_to_delete:
            log('Deleting pool: %s' % pool_name)
            calicoctl('delete', 'pool', pool_name, '--skip-not-exists')

        for cidr, name in zip(cidrs, names):
            # configure the default pool
            pool = {
                'apiVersion': 'projectcalico.org/v3',
                'kind': 'IPPool',
                'metadata': {
                    'name': name,
                },
                'spec': {
                    'cidr': cidr,
                    'ipipMode': config['ipip'],
                    'vxlanMode': config['vxlan'],
                    'natOutgoing': config['nat-outgoing'],
                }
            }

            calicoctl_apply(pool)
    except CalledProcessError:
        log(traceback.format_exc())
        if config['ipip'] != 'Never' and config['vxlan'] != 'Never':
            status.blocked('ipip and vxlan configs are in conflict')
        else:
            status.waiting('Waiting to retry calico pool configuration')
        return

    set_state('calico.pool.configured')
示例#19
0
def upgrade_v3_migrate_data():
    status.maintenance('Migrating data to Calico 3')
    try:
        calico_upgrade.configure()
        calico_upgrade.dry_run()
        calico_upgrade.start()
    except Exception:
        log(traceback.format_exc())
        message = 'Calico upgrade failed, see debug log'
        status.blocked(message)
        return
    leader_set({'calico-v3-data-migration-needed': None})
示例#20
0
def create_topics():
    if not os.path.exists('/usr/lib/kafka/bin'):
        status.blocked('Could not find Kafka library, make sure the Kafka Connect charm is colocated with a Kafka unit.')
        return
    kafka = endpoint_from_flag('kafka.ready')
    zookeepers = []
    for zookeeper in kafka.zookeepers():
        zookeepers.append(zookeeper['host'] + ":" + zookeeper['port'])
    if not zookeepers:
        return
    # Set replication factor as number of Kafka brokers
    # Use the zookeeper-shell because Juju sets the Kafka
    # broker info one hook at a time and therefore we do not 
    # know beforehand howmany brokers there are    
    p = Popen(['/usr/lib/kafka/bin/zookeeper-shell.sh',
                   zookeepers[0]],
               stdin=PIPE, 
               stdout=PIPE)
    output = p.communicate(b'ls /brokers/ids')[0]
    # The broker info is in the last line between [id1,id2,id3]
    replication_factor = output.decode('utf-8') \
                               .strip() \
                               .split('\n')[-1] \
                               .count(',') \
                               + 1
    topics_suffixes = ['connectconfigs', 'connectoffsets', 'connectstatus']
    partitions = [1, 50, 10] # Best effort partition numbers
    model = os.environ['JUJU_MODEL_NAME']
    app = os.environ['JUJU_UNIT_NAME'].split('/')[0]
    prefix = "{}.{}.".format(model, app)
    for (suffix, partitions) in zip(topics_suffixes, partitions):
        topic = prefix + suffix
        unitdata.kv().set(suffix, topic)
        if topic_exists(topic, zookeepers):
            continue
        try:
            output = run(['/usr/lib/kafka/bin/kafka-topics.sh',
                          '--zookeeper',
                          ",".join(zookeepers),
                          "--create",
                          "--topic",
                          topic,
                          "--partitions",
                          str(partitions),
                          "--replication-factor",
                          str(replication_factor,
                          "--config",
                          "cleanup.policy=compact")], stdout=PIPE)
            output.check_returncode()
        except CalledProcessError as e:
            log(e)
    set_flag('kafka-connect-base.topic-created')
示例#21
0
def create_nginx_config(filename, fqdn, upstreams, cert, credentials,
                        htaccess_name, subdir, nginx_config):
    # fqdn has to be a list
    credentials = credentials.split()
    # Did we get a valid value? If not, blocked!
    if len(credentials) not in (0, 2):
        status.blocked('authentication config wrong! '
                       'I expect 2 space-separated string. I got {}.'.format(
                           len(credentials)))
        return
    # We got a valid value, signal to regenerate config.
    if os.path.exists('/etc/nginx/.' + htaccess_name):
        os.remove('/etc/nginx/.' + htaccess_name)

    nginx_context = {
        'privkey': cert['privkey'],
        'fullchain': cert['fullchain'],
        'fqdn': " ".join(fqdn),
        'upstreams': upstreams,
        'upstream_name': filename,
        'dhparam': cert['dhparam'],
        'auth_basic': bool(credentials),
    }

    if nginx_config:
        nginx_context['nginx_config'] = nginx_config

    # Did we get credentials? If so, configure them.
    if len(credentials) == 2:
        check_call([
            'htpasswd', '-c', '-b', '/etc/nginx/.' + htaccess_name,
            credentials[0], credentials[1]
        ])
        nginx_context['htpasswd'] = '/etc/nginx/.' + htaccess_name

    cfg = templating.render(source="encrypt.nginx.jinja2",
                            target=None,
                            context=nginx_context)
    nginxcfg = NginxConfig()
    try:
        nginxcfg.write_config(NginxModule.HTTP, cfg, filename, subdir=subdir)
        nginxcfg.enable_all_config(NginxModule.HTTP, subdir=subdir) \
                .validate_nginx() \
                .reload_nginx()
        return True
    except NginxConfigError as e:
        log(e)
        status.blocked('{}'.format(e))
        return False
def get_vsphere_config():
    config = hookenv.config()
    vsphere_config = {
        'datastore': config['datastore'],
        'folder': config['folder'],
        'respool_path': config['respool_path'],
    }
    # datastore and folder can't be emtpy
    if not vsphere_config['datastore']:
        status.blocked("Missing required 'datastore' config")
        return False
    if not vsphere_config['folder']:
        status.blocked("Missing required 'folder' config")
        return False
    return vsphere_config
def ensure_db_exists(database, influxdb_host, influxdb_port):
    # Create the db if possible.
    data = {
        'q': 'CREATE DATABASE {}'.format(database),
    }
    url = "http://{}:{}/query".format(influxdb_host, influxdb_port)
    try:
        resp = requests.post(url, data=data)
        resp.raise_for_status()
        if resp.status_code == 200:
            return True
    except requests.exceptions.RequestException as e:
        log(e)
        status.blocked('Error creating Influxdb database.')
    return False
示例#24
0
def install_etcd():
    """Attempt resource get on the "etcd" and "etcdctl" resources. If no
    resources are provided attempt to install from the archive only on the
    16.04 (xenial) series."""

    if is_state("etcd.installed"):
        msg = "Manual upgrade required. run-action snap-upgrade."
        status.blocked(msg)
        return

    status.maintenance("Installing etcd.")

    channel = get_target_etcd_channel()
    if channel:
        snap.install("etcd", channel=channel, classic=False)
示例#25
0
def update_status():
    if not is_flag_set('vault.connected'):
        status.blocked('missing relation to vault')
        return
    if not is_flag_set('layer.vaultlocker.configured'):
        status.waiting('waiting for vaultlocker config')
        return
    ready, missing = [], []
    for storage in ('secrets', 'secrets/0', 'multi-secrets', 'multi-secrets/0',
                    'multi-secrets/1', 'multi-secrets/2'):
        if is_flag_set('layer.vaultlocker.{}.ready'.format(storage)):
            ready.append(storage)
        else:
            missing.append(storage)
    status.active('ready: {}; missing: {}'.format(','.join(ready),
                                                  ','.join(missing)))
示例#26
0
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    msg = "missing credentials; set credentials config"
    config = hookenv.config()
    credentials = {}
    # try to use Juju's trust feature
    try:
        result = subprocess.run(
            ["credential-get"],
            check=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        creds = yaml.safe_load(result.stdout.decode("utf8"))
        creds_data = creds["credential"]["attributes"]
        login_cli(creds_data)
        credentials = creds_data
    except FileNotFoundError:
        pass  # juju trust not available
    except subprocess.CalledProcessError as e:
        if "permission denied" not in e.stderr.decode("utf8"):
            raise
        msg = "missing credentials access; grant with: juju trust"

    # try credentials config
    if config["credentials"]:
        try:
            creds_data = b64decode(config["credentials"]).decode("utf8")
            loaded_creds = json.loads(creds_data)
            login_cli(loaded_creds)
            credentials = loaded_creds
        except Exception as ex:
            msg = "invalid value for credentials config"
            log_debug("{}: {}", msg, ex)
            credentials = {}

    if credentials == {}:
        status.blocked(msg)
        return credentials

    credentials.setdefault("managed-identity", True)

    return credentials
示例#27
0
def ready():
    preconditions = [
        'calico.service.installed', 'calico.pool.configured',
        'calico.cni.configured', 'calico.bgp.globals.configured',
        'calico.node.configured', 'calico.bgp.peers.configured'
    ]
    if is_state('upgrade.series.in-progress'):
        status.blocked('Series upgrade in progress')
        return
    for precondition in preconditions:
        if not is_state(precondition):
            return
    if is_leader() and not is_state('calico.npc.deployed'):
        status.waiting('Waiting to retry deploying policy controller')
        return
    if not service_running('calico-node'):
        status.waiting('Waiting for service: calico-node')
        return
    status.active('Calico is active')
示例#28
0
def get_credentials():
    """
    Get the credentials from either the config or the hook tool.

    Prefers the config so that it can be overridden.
    """
    no_creds_msg = 'missing credentials; set credentials config'
    config = hookenv.config()
    # try to use Juju's trust feature
    try:
        result = subprocess.run(['credential-get'],
                                check=True,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        creds = yaml.load(result.stdout.decode('utf8'))
        creds_data = creds['credential']['attributes']
        login_cli(creds_data)
        return True
    except FileNotFoundError:
        pass  # juju trust not available
    except subprocess.CalledProcessError as e:
        if 'permission denied' not in e.stderr.decode('utf8'):
            raise
        no_creds_msg = 'missing credentials access; grant with: juju trust'

    # try credentials config
    if config['credentials']:
        try:
            creds_data = b64decode(config['credentials']).decode('utf8')
            login_cli(json.loads(creds_data))
            return True
        except Exception as ex:
            msg = 'invalid value for credentials config'
            log_debug('{}: {}', msg, ex)
            status.blocked(msg)
            return False

    # no creds provided
    status.blocked(no_creds_msg)
    return False
示例#29
0
def configure_keepalived_service():
    """Set up the keepalived service"""

    virtual_ip = config().get("virtual_ip")
    if virtual_ip == "":
        status.blocked("Please configure virtual ips")
        return

    network_interface = config().get("network_interface")
    if network_interface == "":
        network_interface = default_route_interface()

    context = {
        "is_leader": is_leader(),
        "virtual_ip": virtual_ip,
        "network_interface": network_interface,
        "router_id": config().get("router_id"),
        "service_port": config().get("port"),
        "healthcheck_interval": config().get("healthcheck_interval"),
    }
    render(
        source="keepalived.conf",
        target=KEEPALIVED_CONFIG_FILE,
        context=context,
        perms=0o644,
    )
    service_restart("keepalived")

    render(
        source="50-keepalived.conf",
        target=SYSCTL_FILE,
        context={"sysctl": {"net.ipv4.ip_nonlocal_bind": 1}},
        perms=0o644,
    )
    service_restart("procps")

    status.active("VIP ready")
    set_flag("keepalived.started")
示例#30
0
def install_calico_binaries():
    ''' Unpack the Calico binaries. '''
    # on intel, the resource is called 'calico'; other arches have a suffix
    architecture = arch()
    if architecture == 'amd64':
        resource_name = 'calico-cni'
    else:
        resource_name = 'calico-cni-{}'.format(architecture)

    try:
        archive = resource_get(resource_name)
    except Exception:
        message = 'Error fetching the calico resource.'
        log(message)
        status.blocked(message)
        return

    if not archive:
        message = 'Missing calico resource.'
        log(message)
        status.blocked(message)
        return

    filesize = os.stat(archive).st_size
    if filesize < 1000000:
        message = 'Incomplete calico resource'
        log(message)
        status.blocked(message)
        return

    status.maintenance('Unpacking calico resource.')

    charm_dir = os.getenv('CHARM_DIR')
    unpack_path = os.path.join(charm_dir, 'files', 'calico')
    os.makedirs(unpack_path, exist_ok=True)
    cmd = ['tar', 'xfz', archive, '-C', unpack_path]
    log(cmd)
    check_call(cmd)

    apps = [
        {
            'name': 'calico',
            'path': '/opt/cni/bin'
        },
        {
            'name': 'calico-ipam',
            'path': '/opt/cni/bin'
        },
    ]

    for app in apps:
        unpacked = os.path.join(unpack_path, app['name'])
        app_path = os.path.join(app['path'], app['name'])
        install = ['install', '-v', '-D', unpacked, app_path]
        check_call(install)

    set_state('calico.binaries.installed')