def ensure_beats_are_running():
    status_set('maintenance', f'ensuring beats are fully started')
    for beat in ['filebeat', 'metricbeat']:
        if start_restart_systemd_service(beat):
            status_set('active', f'{beat} has initially started')

            ctr = 0
            beat_record = 0

            while True:
                if ctr == 100:
                    status_set('blocked',
                               f'{beat} not starting - please debug')
                    return
                if beat_record == 10:
                    status_set('active', f'{beat} started')
                    set_flag(f'elasticsearch.{beat}.available')
                    break

                status_set('maintenance', f'ensuring {beat} has fully started')

                if service_running(beat):
                    beat_record += 1
                else:
                    start_restart_systemd_service(beat)
                    beat_record = 0

                ctr += 1
                sleep(1)

    if is_flag_set('elasticsearch.filebeat.available') and\
            is_flag_set('elasticsearch.metricbeat.available'):
        set_flag(f'elasticsearch.beats.available')
    es_active_status()
Exemple #2
0
def ensure_sufficient_masters():
    """Redis enforces us to use at minimum 3 master nodes.
    Set leader flag indicating we have met the minimum # nodes.
    """

    if is_flag_set('endpoint.cluster.peer.joined'):
        endpoint = 'endpoint.cluster.peer.joined'
    elif is_flag_set('endpoint.cluster.peer.changed'):
        endpoint = 'endpoint.cluster.peer.changed'
    else:
        status.blocked('No peer endpoint set')
        return

    # Get the peers, check for min length
    peers = endpoint_from_flag(endpoint).all_units
    peer_ips = [peer._data['private-address']
                for peer in peers if peer._data is not None]
    if len(peer_ips) > 1:
        status.active(
            "Minimum # masters available, got {}.".format(len(peer_ips)+1))
        init_masters = \
            ",".join(peer_ips + [unit_private_ip()])
        charms.leadership.leader_set(init_masters=init_masters)

    clear_flag('endpoint.cluster.peer.joined')
    clear_flag('endpoint.cluster.peer.changed')
def configure_resources(*args):
    """Create/discover resources for management of load balancer instances."""
    if not reactive.is_flag_set('leadership.is_leader'):
        return ch_core.hookenv.action_fail('action must be run on the leader '
                                           'unit.')
    if not reactive.all_flags_set(
            'identity-service.available', 'neutron-api.available',
            'sdn-subordinate.available', 'amqp.available'):
        return ch_core.hookenv.action_fail('all required relations not '
                                           'available, please defer action'
                                           'until deployment is complete.')
    identity_service = reactive.endpoint_from_flag(
        'identity-service.available')
    try:
        (network, secgrp) = api_crud.get_mgmt_network(
            identity_service,
            create=reactive.is_flag_set('config.default.create-mgmt-network'),
        )
    except api_crud.APIUnavailable as e:
        ch_core.hookenv.action_fail(
            'Neutron API not available yet, deferring '
            'network creation/discovery. ("{}")'.format(e))
        return
    if network and secgrp:
        leadership.leader_set({
            'amp-boot-network-list': network['id'],
            'amp-secgroup-list': secgrp['id']
        })
    if reactive.is_flag_set('config.default.custom-amp-flavor-id'):
        # NOTE(fnordahl): custom flavor provided through configuration is
        # handled in the charm class configuration property.
        try:
            flavor = api_crud.get_nova_flavor(identity_service)
        except api_crud.APIUnavailable as e:
            ch_core.hookenv.action_fail('Nova API not available yet, '
                                        'deferring flavor '
                                        'creation. ("{}")'.format(e))
            return
        else:
            leadership.leader_set({'amp-flavor-id': flavor.id})

    amp_key_name = ch_core.hookenv.config('amp-ssh-key-name')
    if amp_key_name:
        identity_service = reactive.endpoint_from_flag(
            'identity-service.available')
        api_crud.create_nova_keypair(identity_service, amp_key_name)

    # Set qutotas to unlimited
    try:
        api_crud.set_service_quotas_unlimited(identity_service)
    except api_crud.APIUnavailable as e:
        ch_core.hookenv.action_fail(
            'Unbable to set quotas to unlimited: {}'.format(e))

    # execute port setup for leader, the followers will execute theirs on
    # `leader-settings-changed` hook
    with charm.provide_charm_instance() as octavia_charm:
        api_crud.setup_hm_port(identity_service, octavia_charm)
        octavia_charm.render_all_configs()
        octavia_charm._assess_status()
Exemple #4
0
def config_changed():
    layer.status.maint('Reconfiguring the registry.')
    charm_config = hookenv.config()
    name = charm_config.get('registry-name')

    # If a provider gave us certs and http-host changed, make sure SANs are accurate
    if (is_flag_set('cert-provider.certs.available')
            and charm_config.changed('http-host')):
        request_certificates()

    # If our name changed, make sure we stop the old one
    if (charm_config.changed('registry-name')
            and charm_config.previous('registry-name')):
        name = charm_config.previous('registry-name')

    layer.docker_registry.stop_registry(name=name)
    layer.docker_registry.configure_registry()
    layer.docker_registry.start_registry()

    # Now that we reconfigured the registry, inform connected clients if
    # anything changed that they should know about.
    if (is_flag_set('charm.docker-registry.client-configured') and any(
        (charm_config.changed('auth-basic-password'),
         charm_config.changed('auth-basic-user'),
         charm_config.changed('http-host')))):
        configure_client()

    report_status()
def report_status():
    '''Update status based on related charms/config.'''
    app_suffix = []
    charm_config = hookenv.config()
    name = charm_config.get('registry-name')
    netloc = layer.docker_registry.get_netloc()

    if layer.docker_registry.is_container(name, all=False):
        if is_flag_set('charm.docker-registry.tls-enabled'):
            app_suffix.append('https')
        else:
            app_suffix.append('http')
        if is_flag_set('website.available'):
            app_suffix.append('proxied')

        if app_suffix:
            status_suffix = ' ({})'.format(', '.join(app_suffix))
        else:
            status_suffix = ''

        if is_flag_set('leadership.is_leader'):
            layer.status.active('Listening on {}{}.'.format(
                netloc, status_suffix))
        else:
            layer.status.active('Backup listening on {}{}.'.format(
                netloc, status_suffix))
    else:
        layer.status.blocked('{} container is stopped.'.format(name))
Exemple #6
0
def acquire_db_config():
    """Acquire juju provided database config
    """

    if is_flag_set('pgsql.master.available'):
        pgsql = endpoint_from_flag('pgsql.master.available')

        if pgsql is None:
            log('PostgeSQL not found', level='ERROR')
            return

        db = pgsql.master

        kv.set('db-scheme', 'pgsql')
        kv.set('db-user', db.user)
        kv.set('db-password', db.password)
        kv.set('db-host', db.host)
        kv.set('db-base', db.dbname)

    elif is_flag_set('mysql.available'):
        mysql = endpoint_from_flag('mysql.available')
        prefix = "fresh-rss"

        if mysql is None:
            log('MySQL not found', level='ERROR')
            return

        kv.set('db-scheme', 'mysql')
        kv.set('db-user', mysql.username(prefix))
        kv.set('db-password', mysql.password(prefix))
        kv.set('db-host', mysql.hostname(prefix))
        kv.set('db-base', mysql.database(prefix))

    status.active('Fresh-RSS Database Acquired')
    set_flag('fresh-rss.db.config.acquired')
Exemple #7
0
def install(snapname, **kw):
    '''Install a snap.

    Snap will be installed from the coresponding resource if available,
    otherwise from the Snap Store.

    Sets the snap.installed.{snapname} flag.

    If the snap.installed.{snapname} flag is already set then the refresh()
    function is called.
    '''
    installed_flag = get_installed_flag(snapname)
    local_flag = get_local_flag(snapname)
    if reactive.is_flag_set(installed_flag):
        refresh(snapname, **kw)
    else:
        if hookenv.has_juju_version('2.0'):
            res_path = _resource_get(snapname)
            if res_path is False:
                _install_store(snapname, **kw)
            else:
                _install_local(res_path, **kw)
                reactive.set_flag(local_flag)
        else:
            _install_store(snapname, **kw)
        reactive.set_flag(installed_flag)

    # Installing any snap will first ensure that 'core' is installed. Set an
    # appropriate flag for consumers that want to get/set core options.
    core_installed = get_installed_flag('core')
    if not reactive.is_flag_set(core_installed):
        reactive.set_flag(core_installed)
Exemple #8
0
def not_configured():
    if not is_flag_set('config.set.ga-view-id'):
        blocked('ga-view-id must be set')
    elif not is_flag_set('config.set.ga-creds'):
        blocked('ga-creds must be set')
    else:
        blocked('Waiting for push-gateway relation')
Exemple #9
0
def test_create_token_sign_auth_requests(get_token, setup_tokens):
    set_flag("kubernetes-master.auth-webhook-tokens.setup")
    kube_control = endpoint_from_flag.return_value
    get_token.return_value = None
    clear_flag("kubernetes-master.auth-webhook-tokens.setup")
    assert not kubernetes_master.create_tokens_and_sign_auth_requests()
    assert kube_control.sign_auth_request.call_count == 0
    assert not is_flag_set("kubernetes-master.auth-webhook-tokens.setup")

    endpoint_from_flag.return_value = None
    get_token.return_value = True
    clear_flag("kubernetes-master.auth-webhook-tokens.setup")
    assert kubernetes_master.create_tokens_and_sign_auth_requests()
    assert kube_control.sign_auth_request.call_count == 0
    assert is_flag_set("kubernetes-master.auth-webhook-tokens.setup")

    endpoint_from_flag.return_value = kube_control
    kube_control.auth_user.return_value = [
        (None, {"user": "******", "group": "foo"}),
        (None, {"user": None, "group": None}),
    ]
    clear_flag("kubernetes-master.auth-webhook-tokens.setup")
    assert kubernetes_master.create_tokens_and_sign_auth_requests()
    assert kube_control.sign_auth_request.call_count == 1
    assert is_flag_set("kubernetes-master.auth-webhook-tokens.setup")

    kube_control.auth_user.return_value = [
        (None, {"user": "******", "group": "foo"}),
        (None, {"user": "******", "group": "bar"}),
    ]
    clear_flag("kubernetes-master.auth-webhook-tokens.setup")
    assert kubernetes_master.create_tokens_and_sign_auth_requests()
    assert kube_control.sign_auth_request.call_count == 3
    assert is_flag_set("kubernetes-master.auth-webhook-tokens.setup")
Exemple #10
0
def rebalance_and_remove():
    """Rebalance and remove.
    Rebalance the node slots before removal.
    """
    if is_flag_set('redis.cluster.joined') and \
       not is_flag_set('redis.cluster.stopped'):
        nodes_info_json = charms.leadership.leader_get("cluster_nodes_json")
        nodes_info = json.loads(nodes_info_json)
        for node in nodes_info:
            if node['node_ip'] == unit_private_ip():
                # Rebalance slots away from node to remove
                cmd = ("{} --cluster rebalance {}:6379 "
                       "--cluster-weight {}=0").format(
                           REDIS_CLI, node['node_ip'], node['node_id'])
                out = check_output(cmd, shell=True)
                log(out)
                # TODO: Need to figure out a way to poll here.
                sleep(5)
                try:
                    # Remove node from cluster
                    cmd = "{} --cluster del-node {}:6379 {}".format(
                        REDIS_CLI, node['node_ip'], node['node_id'])
                    out = check_output(cmd, shell=True)
                    log(out)
                except CalledProcessError as e:
                    log(e)
        set_flag('redis.cluster.stopped')
Exemple #11
0
    def create_cluster(self):
        """Create the MySQL InnoDB cluster.

        Creates the MySQL InnoDB cluster using self.cluster_name.

        :param self: Self
        :type self: MySQLInnoDBClusterCharm instance
        :side effect: Executes MySQL Shell script to create the MySQL InnoDB
                      Cluster
        :returns: This function is called for its side effect
        :rtype: None
        """
        if reactive.is_flag_set("leadership.set.cluster-created"):
            ch_core.hookenv.log(
                "Cluster: {}, already created".format(
                    self.options.cluster_name), "WARNING")
            return

        if not reactive.is_flag_set(
                "leadership.set.cluster-instance-configured-{}".format(
                    self.cluster_address)):
            ch_core.hookenv.log(
                "This instance is not yet configured for "
                "clustering, delaying cluster creation.", "WARNING")
            return

        _script_template = """
        shell.connect("{}:{}@{}")
        var cluster = dba.createCluster("{}");
        """
        ch_core.hookenv.log(
            "Creating cluster: {}.".format(self.options.cluster_name), "INFO")
        with tempfile.NamedTemporaryFile(mode="w", suffix=".js") as _script:
            _script.write(
                _script_template.format(
                    self.cluster_user, self.cluster_password,
                    self.cluster_address, self.options.cluster_name,
                    self.cluster_user, self.cluster_address,
                    self.cluster_password))
            _script.flush()

            cmd = ([self.mysqlsh_bin, "--no-wizard", "-f", _script.name])
            try:
                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                ch_core.hookenv.log(
                    "Failed creating cluster: {}".format(
                        e.output.decode("UTF-8")), "ERROR")
                return
        ch_core.hookenv.log("Cluster Created: {}".format(
            output.decode("UTF-8")),
                            level="DEBUG")
        leadership.leader_set({
            "cluster-instance-clustered-{}".format(self.cluster_address):
            True
        })
        leadership.leader_set({"cluster-created": str(uuid.uuid4())})
Exemple #12
0
def test_service_cidr_expansion():
    configure_apiserver('10.152.183.0/24', '10.152.0.0/16')
    assert is_flag_set('kubernetes-master.had-service-cidr-expanded')
    clear_flag('kubernetes-master.had-service-cidr-expanded')
    configure_apiserver('10.152.183.0/24,fe80::/120', '10.152.183.0/24,fe80::/112')
    assert is_flag_set('kubernetes-master.had-service-cidr-expanded')
    unitdata.kv().get.return_value = '10.152.0.0/16'
    update_for_service_cidr_expansion()
    assert kubectl.call_count == 4
def add_new_peer_nodes_to_cluster():
    """Add new peers to cluster
    """

    if is_flag_set('endpoint.cluster.peer.joined'):
        endpoint = 'endpoint.cluster.peer.joined'
    elif is_flag_set('endpoint.cluster.peer.changed'):
        endpoint = 'endpoint.cluster.peer.changed'
    else:
        status.blocked('No peer endpoint set')
        return

    # Get the known application peer ip addressese from juju perspective
    peers = endpoint_from_flag(endpoint).all_units
    peer_ips = [
        peer._data['private-address'] for peer in peers
        if peer._data is not None
    ]

    # Get the known cluster node ips from redis point of view
    cluster_node_ips = [node['node_ip'] for node in get_cluster_nodes_info()]

    # Compare the nodes in the cluster to the peer nodes that juju is aware of
    # Register nodes that are juju peers, but not part of the cluster
    node_added = False
    for ip in peer_ips:
        if ip not in cluster_node_ips:
            node_added = True
            cmd = "{} --cluster add-node {}:6379 {}:6379".format(
                REDIS_CLI, ip, unit_private_ip())
            out = check_output(cmd, shell=True)
            log(out)

    # Give the cluster a second to recognize the new node
    sleep(1)

    if node_added:
        cluster_nodes = get_cluster_nodes_info()
        cluster_node_ips = [node['node_ip'] for node in cluster_nodes]
        cluster_node_ids = [node['node_id'] for node in cluster_nodes]

        charms.leadership.leader_set(
            cluster_node_ips=",".join(cluster_node_ips))
        charms.leadership.leader_set(
            cluster_nodes_json=json.dumps(cluster_nodes))

        # Generate the weights string for the rebalance command
        node_weights = " ".join(
            ["{}=1".format(node_id) for node_id in cluster_node_ids])
        cmd = ("{} --cluster rebalance --cluster-weight {} "
               "--cluster-timeout 3600 --cluster-use-empty-masters "
               "{}:6379").format(REDIS_CLI, node_weights, unit_private_ip())
        out = check_output(cmd, shell=True)
        log(out)

    clear_flag('endpoint.cluster.peer.joined')
    clear_flag('endpoint.cluster.peer.changed')
def test_service_cidr_greenfield_deploy():
    cni = configure_apiserver(None, "10.152.183.0/24")
    cni.set_service_cidr.assert_called_once_with("10.152.183.0/24")
    assert not is_flag_set(
        "kubernetes-control-plane.had-service-cidr-expanded")
    cni = configure_apiserver(None, "10.152.183.0/24,fe80::/120")
    cni.set_service_cidr.assert_called_once_with("10.152.183.0/24,fe80::/120")
    assert not is_flag_set(
        "kubernetes-control-plane.had-service-cidr-expanded")
def scale_in():
    """ Handle scale in.

    Only react to cluster.departed, not any other departed hook nor a
    cluster.broken hook. Cluster.departed is only executed once on any given
    node. We want to shutdown and clean up only in a graceful departing
    scenario. The remove-instance action will function for all other scenarios.

    If this is the node departing, stop services and notify peers. If this is
    the leader node and not the departing node, attempt to remove the instance
    from cluster metdata.
    """
    # Intentionally using the charm helper rather than the interface to
    # guarantee we get only the departing instance's cluster-address
    _departing_address = ch_core.hookenv.relation_get("cluster-address")
    _departing_unit = ch_core.hookenv.departing_unit()
    if not _departing_unit:
        ch_core.hookenv.log(
            "In a cluster departing hook but departing unit is unset. "
            "Doing nothing.", "WARNING")
        return

    with charm.provide_charm_instance() as instance:
        if ch_core.hookenv.local_unit() == _departing_unit:
            # If this is the departing unit stop mysql and attempt a clean
            # departure.
            ch_core.hookenv.log(
                "{} is this unit departing. Shutting down.".format(
                    _departing_unit), "WARNING")
            reactive.set_flag("local.cluster.unit.departing")
            instance.depart_instance()
            if reactive.is_flag_set('leadership.is_leader'):
                ch_core.hookenv.log(
                    "Since this departing instance is the juju leader node it "
                    "is not possible to automatically remove it from cluster "
                    "metadata. Run the remove-instance action on the newly "
                    "elected leader with address={} to remove it from cluster "
                    "metadata and clear flags.".format(
                        instance.cluster_address), "WARNING")
        elif reactive.is_flag_set('leadership.is_leader'):
            # Attempt to clean up departing unit.
            # If the departing unit's IP remains in cluster metadata as seen in
            # the cluster-status action, run the remove-instance action with
            # the "MISSING" instance's IP.
            if _departing_address:
                ch_core.hookenv.log(
                    "Automatically removing departing instance {} from "
                    "cluster metadata.".format(_departing_address), "WARNING")
                instance.remove_instance(json.loads(_departing_address),
                                         force=True)
            else:
                ch_core.hookenv.log(
                    "Leader is unable to cleanly remove departing instance "
                    "{_du}. No cluster-address provided. Run remove-instance "
                    "address={_du} to clear cluster metadata and flags.".
                    format(_du=_departing_unit), "WARNING")
Exemple #16
0
def test_service_cidr_expansion():
    configure_apiserver("10.152.183.0/24", "10.152.0.0/16")
    assert is_flag_set("kubernetes-master.had-service-cidr-expanded")
    clear_flag("kubernetes-master.had-service-cidr-expanded")
    configure_apiserver("10.152.183.0/24,fe80::/120", "10.152.183.0/24,fe80::/112")
    assert is_flag_set("kubernetes-master.had-service-cidr-expanded")
    db = unitdata.kv()
    db.set("kubernetes-master.service-cidr", "10.152.0.0/16")
    update_for_service_cidr_expansion()
    assert kubectl.call_count == 4
def test_service_cidr_non_expansion():
    cni = configure_apiserver("10.152.183.0/24", "10.154.183.0/24")
    cni.set_service_cidr.assert_called_once_with("10.152.183.0/24")
    assert not is_flag_set(
        "kubernetes-control-plane.had-service-cidr-expanded")
    cni = configure_apiserver("10.152.183.0/24,fe80::/120",
                              "10.152.183.0/24,fe81::/120")
    cni.set_service_cidr.assert_called_once_with("10.152.183.0/24,fe80::/120")
    assert not is_flag_set(
        "kubernetes-control-plane.had-service-cidr-expanded")
Exemple #18
0
 def _validate_deploy_interfaces(self, interfaces):
     valid_interfaces = VALID_DEPLOY_INTERFACES
     has_secret = reactive.is_flag_set("leadership.set.temp_url_secret")
     for interface in interfaces:
         if interface not in valid_interfaces:
             raise ValueError('Deploy interface %s is not valid. Valid '
                              'interfaces are: %s' %
                              (interface, ", ".join(valid_interfaces)))
     if reactive.is_flag_set("config.complete"):
         if "direct" in interfaces and has_secret is False:
             raise ValueError('run set-temp-url-secret action on leader to '
                              'enable direct deploy method')
def change_config(conf):
    port = conf['port']
    driver_port = conf['driver_port']
    cluster_port = conf['cluster_port']
    unit = local_unit().replace('/', '_')
    old_port = conf.previous('port')
    old_driver_port = conf.previous('driver_port')
    old_cluster_port = conf.previous('cluster_port')
    if conf['admin_console']:
        admin_console = ''
    else:
        admin_console = 'no-http-admin'
    clustering = ''
    if not is_flag_set('leadership.is_leader'):
        clustering = 'join=' + leader_get('leader_ip') + ':' + str(
            cluster_port)
    conf_parameters = [
        str(port),
        str(driver_port),
        str(cluster_port), unit, admin_console, clustering
    ]
    conf_state = [
        conf.changed('port'),
        conf.changed('driver_port'),
        conf.changed('cluster_port'),
        conf.changed('admin_console')
    ]
    if True in conf_state:
        render_conf_file(conf_parameters)
        for former_port in [old_port, old_driver_port, old_cluster_port]:
            if former_port is not None:
                close_port(former_port)
        if conf['admin_console']:
            open_port(port)
        open_port(driver_port)
        open_port(cluster_port)
    if conf.changed('admin_password') and not kv.get('initial_state'):
        new_password = conf['admin_password']
        if is_flag_set('leadership.is_leader'):
            old_password = leader_get('password')
            import rethinkdb as r
            conn = r.connect(host="localhost",
                             port=driver_port,
                             db='rethinkdb',
                             password=old_password).repl()
            r.table('users').get('admin').update({
                'password': new_password
            }).run(conn)
            conn.close()
            leader_set({'password': new_password})
        kv.set('password', new_password)
    kv.set('initial_state', False)
Exemple #20
0
def request_db():
    """Request the database from postgresql or mysql
    """
    db_name = "fresh-rss"
    db_prefix = "fresh-rss"
    db_user = "******"

    if is_flag_set('pgsql.connected'):
        pgsql = endpoint_from_flag('pgsql.connected')
        pgsql.set_database(db_name)
    elif is_flag_set('mysql.connected'):
        mysql = endpoint_from_flag('mysql.connected')
        mysql.configure(db_name, db_user, prefix=db_prefix)
    set_flag('fresh-rss.db.requested')
Exemple #21
0
def configure_ovs():
    ovsdb = reactive.endpoint_from_flag('ovsdb.available')
    with charm.provide_charm_instance() as charm_instance:
        if reactive.is_flag_set('config.changed.enable-dpdk'):
            # Install required packages and/or run update-alternatives
            charm_instance.install()
        charm_instance.render_with_interfaces(
            charm.optional_interfaces((ovsdb, ), 'nova-compute.connected',
                                      'amqp.connected'))
        charm_instance.configure_ovs(
            ','.join(ovsdb.db_sb_connection_strs),
            reactive.is_flag_set('config.changed.disable-mlockall'))
        reactive.set_flag('config.rendered')
        charm_instance.assess_status()
Exemple #22
0
def _remove_service():
    if is_flag_set('charm.aws-iam.deployed-service'):
        hookenv.log("Cleaning up service...")
        if is_flag_set('endpoint.aws-iam.available'):
            try:
                _kubectl('delete', '-f', secret_yaml)
            except CalledProcessError as e:
                hookenv.log(e)
                hookenv.log(
                    'Failed to delete AWS_IAM service. Will attempt again next update.'
                )  # noqa
                return

        clear_flag('charm.aws-iam.deployed-service')
Exemple #23
0
def set_temp_url_secret(*args):
    """Set Temp-Url-Key on storage account"""
    if not reactive.is_flag_set('leadership.is_leader'):
        return ch_core.hookenv.action_fail('action must be run on the leader '
                                           'unit.')
    if not reactive.is_flag_set('config.complete'):
        return ch_core.hookenv.action_fail('required relations are not yet '
                                           'available, please defer action'
                                           'until deployment is complete.')
    identity_service = reactive.endpoint_from_flag(
        'identity-credentials.available')
    try:
        keystone_session = api_utils.create_keystone_session(identity_service)
    except Exception as e:
        ch_core.hookenv.action_fail(
            'Failed to create keystone session ("{}")'.format(e))

    os_cli = api_utils.OSClients(keystone_session)
    if os_cli.has_swift() is False:
        ch_core.hookenv.action_fail(
            'Swift not yet available. Please wait for deployment to finish')

    if os_cli.has_glance() is False:
        ch_core.hookenv.action_fail(
            'Glance not yet available. Please wait for deployment to finish')

    if "swift" not in os_cli.glance_stores:
        ch_core.hookenv.action_fail(
            'Glance does not support Swift storage backend. '
            'Please add relation between glance and ceph-radosgw/swift')

    current_secret = leadership.leader_get("temp_url_secret")
    current_swift_secret = os_cli.get_object_account_properties().get(
        'temp-url-key', None)

    if not current_secret or current_swift_secret != current_secret:
        secret = hashlib.sha1(str(uuid.uuid4()).encode()).hexdigest()
        os_cli.set_object_account_property("temp-url-key", secret)
        leadership.leader_set({"temp_url_secret": secret})
        # render configs on leader, and assess status. Every other unit
        # will render theirs when leader-settings-changed executes.
        shared_db = reactive.endpoint_from_flag('shared-db.available')
        ironic_api = reactive.endpoint_from_flag('ironic-api.available')
        amqp = reactive.endpoint_from_flag('amqp.available')

        with charm.provide_charm_instance() as ironic_charm:
            ironic_charm.render_with_interfaces(
                charm.optional_interfaces(
                    (identity_service, shared_db, ironic_api, amqp)))
            ironic_charm._assess_status()
Exemple #24
0
def _remove_certificate():
    if is_flag_set('charm.aws-iam.certificate-written'):
        hookenv.log("Cleaning up secret...")
        if is_flag_set('endpoint.aws-iam.available'):
            try:
                _kubectl('delete', '-f', secret_yaml)
            except CalledProcessError as e:
                hookenv.log(e)
                hookenv.log(
                    'Failed to delete AWS_IAM secret. Will attempt again next update.'
                )  # noqa
                return

        clear_flag('charm.aws-iam.certificate-written')
    clear_flag('charm.aws-iam.certificate-requested')
Exemple #25
0
 def custom_assess_status_check(self):
     """Provide mirrored pool statistics through juju status."""
     if (reactive.is_flag_set('config.rendered')
             and reactive.is_flag_set('ceph-local.available')
             and reactive.is_flag_set('ceph-remote.available')):
         endpoint = reactive.endpoint_from_flag('ceph-local.available')
         for pool, attrs in endpoint.pools.items():
             if 'rbd' in attrs['applications']:
                 status = self.mirror_pool_status(pool)
                 ch_core.hookenv.log(
                     'DEBUG: mirror_pool_status({}) = "{}"'.format(
                         pool, status),
                     level=ch_core.hookenv.INFO)
         return 'active', 'Custom'
     return None, None
Exemple #26
0
def install():
    # Do nothing if we don't have kernel support yet
    if not kernel_supported():
        return

    opts = sorted_snap_opts()
    # supported-architectures is EXPERIMENTAL and undocumented.
    # It probably should live in the base layer, blocking the charm
    # during bootstrap if the arch is unsupported.
    arch = uname().machine
    for snapname, snap_opts in opts.items():
        supported_archs = snap_opts.pop("supported-architectures", None)
        if supported_archs and arch not in supported_archs:
            # Note that this does *not* error. The charm will need to
            # cope with the snaps it requested never getting installed,
            # likely by doing its own check on supported-architectures.
            hookenv.log(
                "Snap {} not supported on {!r} architecture" "".format(snapname, arch),
                ERROR,
            )
            continue
        installed_flag = "snap.installed.{}".format(snapname)
        if not reactive.is_flag_set(installed_flag):
            snap.install(snapname, **snap_opts)
    if data_changed("snap.install.opts", opts):
        snap.connect_all()
Exemple #27
0
def configure_instances_for_clustering(cluster):
    """Configure cluster peers for clustering.

    Prepare peers to be added to the cluster.

    :param cluster: Cluster interface
    :type cluster: MySQLInnoDBClusterPeers object
    """
    ch_core.hookenv.log("Configuring instances for clustering.", "DEBUG")
    with charm.provide_charm_instance() as instance:
        for unit in cluster.all_joined_units:
            if unit.received['unit-configure-ready']:
                instance.configure_instance(
                    unit.received['cluster-address'])
                instance.add_instance_to_cluster(
                    unit.received['cluster-address'])
        # Verify all are configured
        for unit in cluster.all_joined_units:
            if not reactive.is_flag_set(
                    "leadership.set.cluster-instance-configured-{}"
                    .format(unit.received['cluster-address'])):
                return
        # All have been configured
        leadership.leader_set(
            {"cluster-instances-configured": True})
        instance.assess_status()
Exemple #28
0
def install_fresh_rss():
    """Install FreshRSS
    """

    apply_permissions()
    status.active('Installing FreshRSS')

    install_opts = []
    install_opts.extend(['--default_user', config['default-admin-username']])
    install_opts.extend(['--base_url', config['fqdn']])
    install_opts.extend(['--environment', config['environment']])

    # db specific
    install_opts.extend(['--db-type', kv.get('db-scheme')])
    install_opts.extend(['--db-base', kv.get('db-base')])
    install_opts.extend(['--db-user', kv.get('db-user')])
    install_opts.extend(['--db-password', kv.get('db-password')])
    install_opts.extend(['--db-host', kv.get('db-host')])
    install_opts.extend(['--db-prefix', config['db-prefix']])

    # ensure the needed directories in ./data/
    run_script('prepare')
    run_script('do-install', install_opts)

    if not is_flag_set('leadership.set.default_admin_init'):
        run_script('create-user', [
            '--user', config['default-admin-username'], '--password',
            config['default-admin-password']
        ])
        leader_set(default_admin_init="true")

    apply_permissions()

    status.active('FreshRSS installed')
    set_flag('fresh-rss.installed')
Exemple #29
0
    def enable_openstack(self):
        """Whether charm forms part of an OpenStack deployment.

        :returns: Whether charm forms part of an OpenStack deployment
        :rtype: boolean
        """
        return reactive.is_flag_set('charm.ovn-chassis.enable-openstack')
def setup_hm_port():
    """Create a per unit Neutron and OVS port for Octavia Health Manager.

    This is used to plug the unit into the overlay network for direct
    communication with the octavia managed load balancer instances running
    within the deployed cloud.
    """
    neutron_ovs = reactive.endpoint_from_flag('neutron-openvswitch.connected')
    ovsdb = reactive.endpoint_from_flag('ovsdb-subordinate.available')
    host_id = neutron_ovs.host() if neutron_ovs else ovsdb.chassis_name
    with charm.provide_charm_instance() as octavia_charm:
        identity_service = reactive.endpoint_from_flag(
            'identity-service.available')
        try:
            if api_crud.setup_hm_port(
                    identity_service,
                    octavia_charm,
                    host_id=host_id):
                # trigger config render to make systemd-networkd bring up
                # automatic IP configuration of the new port right now.
                reactive.set_flag('config.changed')
                if reactive.is_flag_set('charm.octavia.action_setup_hm_port'):
                    reactive.clear_flag('charm.octavia.action_setup_hm_port')
        except api_crud.APIUnavailable as e:
            ch_core.hookenv.log('Neutron API not available yet, deferring '
                                'port discovery. ("{}")'
                                .format(e),
                                level=ch_core.hookenv.DEBUG)
            return
Exemple #31
0
def get_bootstrapped_ips():
    ips = set()
    if reactive.is_flag_set('cassandra.bootstrapped'):
        ips.add(cassandra.listen_ip_address())
    u = reactive.endpoint_from_name('cluster')
    if u is not None:
        ips.update(u.get_bootstrapped_ips())
    return ips
Exemple #32
0
def install_packages():
    pin_dse()

    apt.queue_install(cassandra.get_deb_packages())

    if reactive.is_flag_set('apt.queued_installs'):
        with helpers.autostart_disabled():
            if not apt.install_queued():
                return  # apt layer already left us in a blocked state

    if cassandra.get_edition() == 'apache-snap':
        snap.install('cassandra')
    elif cassandra.get_jre() == 'oracle':
        tb = fetch_oracle_jre()
        if tb is None:
            return
        install_oracle_jre_tarball(tb)
    elif cassandra.get_jre() == 'openjdk':
        subprocess.check_call(['update-java-alternatives', '--jre-headless', '--set', 'java-1.8.0-openjdk-amd64'])
    reactive.set_flag('cassandra.installed')
Exemple #33
0
def maybe_restart():
    for k in RESTART_REQUIRED_KEYS:
        if reactive.is_flag_set('config.changed.{}'.format(k)):
            hookenv.log('{} changed, restart required'.format(k))
            reactive.set_flag('cassandra.needs_restart')