Example #1
0
def install_fresh_rss():
    """Install FreshRSS
    """

    apply_permissions()
    status.active('Installing FreshRSS')

    install_opts = []
    install_opts.extend(['--default_user', config['default-admin-username']])
    install_opts.extend(['--base_url', config['fqdn']])
    install_opts.extend(['--environment', config['environment']])

    # db specific
    install_opts.extend(['--db-type', kv.get('db-scheme')])
    install_opts.extend(['--db-base', kv.get('db-base')])
    install_opts.extend(['--db-user', kv.get('db-user')])
    install_opts.extend(['--db-password', kv.get('db-password')])
    install_opts.extend(['--db-host', kv.get('db-host')])
    install_opts.extend(['--db-prefix', config['db-prefix']])

    # ensure the needed directories in ./data/
    run_script('prepare')
    run_script('do-install', install_opts)

    if not is_flag_set('leadership.set.default_admin_init'):
        run_script('create-user', [
            '--user', config['default-admin-username'], '--password',
            config['default-admin-password']
        ])
        leader_set(default_admin_init="true")

    apply_permissions()

    status.active('FreshRSS installed')
    set_flag('fresh-rss.installed')
def db_monitor_respond():
    """Response to db-monitor relation changed."""
    ch_core.hookenv.log("db-monitor connected", ch_core.hookenv.DEBUG)
    db_monitor = reactive.endpoint_from_flag("db-monitor.connected")

    # get related application name = user
    username = related_app = ch_core.hookenv.remote_service_name()

    # get or create db-monitor user password
    db_monitor_stored_passwd_key = "db-monitor.{}.passwd".format(related_app)
    password = leadership.leader_get(db_monitor_stored_passwd_key)
    if not password:
        password = ch_core.host.pwgen()
        leadership.leader_set({db_monitor_stored_passwd_key: password})

    # provide relation data
    with charm.provide_charm_instance() as instance:
        # NOTE (rgildein): Create a custom user with administrator privileges,
        # but read-only access.
        if not instance.create_cluster_user(db_monitor.relation_ip, username,
                                            password, True):
            ch_core.hookenv.log("db-monitor user was not created.",
                                ch_core.hookenv.WARNING)
            return

        db_monitor.provide_access(
            port=instance.cluster_port,
            user=username,
            password=password,
        )

        instance.assess_status()
Example #3
0
def configure_resources(*args):
    """Create/discover resources for management of load balancer instances."""
    if not reactive.is_flag_set('leadership.is_leader'):
        return ch_core.hookenv.action_fail('action must be run on the leader '
                                           'unit.')
    if not reactive.all_flags_set(
            'identity-service.available', 'neutron-api.available',
            'sdn-subordinate.available', 'amqp.available'):
        return ch_core.hookenv.action_fail('all required relations not '
                                           'available, please defer action'
                                           'until deployment is complete.')
    identity_service = reactive.endpoint_from_flag(
        'identity-service.available')
    try:
        (network, secgrp) = api_crud.get_mgmt_network(
            identity_service,
            create=reactive.is_flag_set('config.default.create-mgmt-network'),
        )
    except api_crud.APIUnavailable as e:
        ch_core.hookenv.action_fail(
            'Neutron API not available yet, deferring '
            'network creation/discovery. ("{}")'.format(e))
        return
    if network and secgrp:
        leadership.leader_set({
            'amp-boot-network-list': network['id'],
            'amp-secgroup-list': secgrp['id']
        })
    if reactive.is_flag_set('config.default.custom-amp-flavor-id'):
        # NOTE(fnordahl): custom flavor provided through configuration is
        # handled in the charm class configuration property.
        try:
            flavor = api_crud.get_nova_flavor(identity_service)
        except api_crud.APIUnavailable as e:
            ch_core.hookenv.action_fail('Nova API not available yet, '
                                        'deferring flavor '
                                        'creation. ("{}")'.format(e))
            return
        else:
            leadership.leader_set({'amp-flavor-id': flavor.id})

    amp_key_name = ch_core.hookenv.config('amp-ssh-key-name')
    if amp_key_name:
        identity_service = reactive.endpoint_from_flag(
            'identity-service.available')
        api_crud.create_nova_keypair(identity_service, amp_key_name)

    # Set qutotas to unlimited
    try:
        api_crud.set_service_quotas_unlimited(identity_service)
    except api_crud.APIUnavailable as e:
        ch_core.hookenv.action_fail(
            'Unbable to set quotas to unlimited: {}'.format(e))

    # execute port setup for leader, the followers will execute theirs on
    # `leader-settings-changed` hook
    with charm.provide_charm_instance() as octavia_charm:
        api_crud.setup_hm_port(identity_service, octavia_charm)
        octavia_charm.render_all_configs()
        octavia_charm._assess_status()
Example #4
0
def config_leader():
    leader_set(hostname=hookenv.unit_private_ip())
    leader_set(public_ip=hookenv.unit_public_ip())
    leader_set(username='******')
    leader_set(password=hookenv.config('carte_password'))
    leader_set(port=hookenv.config('carte_port'))
    render_master_config()
def process_snapd_timer():
    """
    Set the snapd refresh timer on the leader so all cluster members
    (present and future) will refresh near the same time.

    :return: None
    """
    # Get the current snapd refresh timer; we know layer-snap has set this
    # when the 'snap.refresh.set' flag is present.
    timer = snap.get(snapname="core",
                     key="refresh.timer").decode("utf-8").strip()
    if not timer:
        # The core snap timer is empty. This likely means a subordinate timer
        # reset ours. Try to set it back to a previously leader-set value,
        # falling back to config if needed. Luckily, this should only happen
        # during subordinate install, so this should remain stable afterward.
        timer = leader_get("snapd_refresh") or hookenv.config("snapd_refresh")
        snap.set_refresh_timer(timer)

        # Ensure we have the timer known by snapd (it may differ from config).
        timer = snap.get(snapname="core",
                         key="refresh.timer").decode("utf-8").strip()

    # The first time through, data_changed will be true. Subsequent calls
    # should only update leader data if something changed.
    if data_changed("snapd_refresh", timer):
        hookenv.log("setting leader snapd_refresh timer to: {}".format(timer))
        leader_set({"snapd_refresh": timer})
Example #6
0
def auth_update():
    # We used to have individual superuser credentials for each node,
    # which was unnecessarily clever.
    username = '******'.format(re.subn(r'\W', '_', hookenv.local_unit())[0])
    username, password = cassandra.get_cqlshrc_credentials(username)
    leadership.leader_set(username=username, password=password)
    hookenv.log('Migrated charm superuser credentials')
Example #7
0
def obtain_munge_key(*args):
    # get flags
    munge_key = hookenv.config().get('munge_key')
    # Generate a munge key if it has not been provided via charm config
    if not munge_key:
        munge_key = host.pwgen(length=4096)
    leadership.leader_set(munge_key=munge_key)
Example #8
0
def set_active_controller():
    '''Elects an active controller unit. This is only done once
    until an operator decides to relocate an active controller
    to a different node via an action or doing a
    juju run --unit <leader-unit> "leader-set active_controller=''"
    '''
    leadership.leader_set(active_controller=hookenv.local_unit())
def upgrade():
    """An upgrade has been triggered."""
    pki_directory = os.path.join(easyrsa_directory, "pki")
    if os.path.isdir(pki_directory):
        # specific handling if the upgrade is from a previous version
        # where certificate_authority_serial is not set at install
        serial_file = "serial"
        with chdir(pki_directory):
            # if the ca and ca_key are set and serial is not
            # set this to serial in the pki directory
            if (os.path.isfile(serial_file)
                    and leader_get("certificate_authority")
                    and leader_get("certificate_authority_key")
                    and not leader_get("certificate_authority_serial")):
                with open(serial_file, "r") as stream:
                    ca_serial = stream.read()
                # set the previously unset certificate authority serial
                leader_set({"certificate_authority_serial": ca_serial})

        charm_pki_directory = os.path.join(charm_directory, "pki")
        # When the charm pki directory exists, it is stale, remove it.
        if os.path.isdir(charm_pki_directory):
            shutil.rmtree(charm_pki_directory)
        # Copy the EasyRSA/pki to the charm pki directory.
        shutil.copytree(pki_directory, charm_pki_directory, symlinks=True)
    clear_flag("easyrsa.installed")
    clear_flag("easyrsa.configured")
Example #10
0
def configure_instances_for_clustering(cluster):
    """Configure cluster peers for clustering.

    Prepare peers to be added to the cluster.

    :param cluster: Cluster interface
    :type cluster: MySQLInnoDBClusterPeers object
    """
    ch_core.hookenv.log("Configuring instances for clustering.", "DEBUG")
    with charm.provide_charm_instance() as instance:
        for unit in cluster.all_joined_units:
            if unit.received['unit-configure-ready']:
                instance.configure_instance(
                    unit.received['cluster-address'])
                instance.add_instance_to_cluster(
                    unit.received['cluster-address'])
        # Verify all are configured
        for unit in cluster.all_joined_units:
            if not reactive.is_flag_set(
                    "leadership.set.cluster-instance-configured-{}"
                    .format(unit.received['cluster-address'])):
                return
        # All have been configured
        leadership.leader_set(
            {"cluster-instances-configured": True})
        instance.assess_status()
Example #11
0
def set_sentry_system_key_to_leader():
    conf = config()
    system_secret_key = conf.get('system-secret-key')
    if system_secret_key:
        pass
    else:
        system_secret_key = gen_random_string()
    leader_set(system_secret_key=system_secret_key)
Example #12
0
    def create_cluster(self):
        """Create the MySQL InnoDB cluster.

        Creates the MySQL InnoDB cluster using self.cluster_name.

        :param self: Self
        :type self: MySQLInnoDBClusterCharm instance
        :side effect: Executes MySQL Shell script to create the MySQL InnoDB
                      Cluster
        :returns: This function is called for its side effect
        :rtype: None
        """
        if reactive.is_flag_set("leadership.set.cluster-created"):
            ch_core.hookenv.log(
                "Cluster: {}, already created".format(
                    self.options.cluster_name), "WARNING")
            return

        if not reactive.is_flag_set(
                "leadership.set.cluster-instance-configured-{}".format(
                    self.cluster_address)):
            ch_core.hookenv.log(
                "This instance is not yet configured for "
                "clustering, delaying cluster creation.", "WARNING")
            return

        _script_template = """
        shell.connect("{}:{}@{}")
        var cluster = dba.createCluster("{}");
        """
        ch_core.hookenv.log(
            "Creating cluster: {}.".format(self.options.cluster_name), "INFO")
        with tempfile.NamedTemporaryFile(mode="w", suffix=".js") as _script:
            _script.write(
                _script_template.format(
                    self.cluster_user, self.cluster_password,
                    self.cluster_address, self.options.cluster_name,
                    self.cluster_user, self.cluster_address,
                    self.cluster_password))
            _script.flush()

            cmd = ([self.mysqlsh_bin, "--no-wizard", "-f", _script.name])
            try:
                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                ch_core.hookenv.log(
                    "Failed creating cluster: {}".format(
                        e.output.decode("UTF-8")), "ERROR")
                return
        ch_core.hookenv.log("Cluster Created: {}".format(
            output.decode("UTF-8")),
                            level="DEBUG")
        leadership.leader_set({
            "cluster-instance-clustered-{}".format(self.cluster_address):
            True
        })
        leadership.leader_set({"cluster-created": str(uuid.uuid4())})
Example #13
0
def _maybe_generate_passwords():
    """
    If the leader hasn't generated passwords yet, generate them.

    """
    if not leader_get('passwords'):
        admin_pass = subprocess.check_output(['pwgen', '-N1']).strip().decode('utf-8')
        repl_pass = subprocess.check_output(['pwgen', '-N1']).strip().decode('utf-8')
        leader_set(passwords=json.dumps({'admin_pass': admin_pass, 'repl_pass': repl_pass}))
Example #14
0
def consume_munge_key(munge_consumer):
    '''consume a munge key if a relation to a provider has been made
    via a consumer interface regardless of whether it has been generated
    or not. Store it in leader settings to propagate to other units.'''
    munge_key = munge_consumer.munge_key
    # do not do anything unless there is actually a key available
    # otherwise, keep using whatever was there before
    if munge_key:
        leadership.leader_set(munge_key=munge_key)
    flags.clear_flag('endpoint.munge-consumer.munge_key_updated')
def enable_ha(cluster):
    """
    Once we have two viable NameNodes and a quorum of JournalNodes,
    inform all cluster units that we are HA.

    Note that this flag is never removed (once HA, always HA) because we
    could temporarily lose JN quorum or viable NNs during fail-over or
    restart, and we don't want to arbitrarily revert to non-HA in that case.
    """
    leadership.leader_set({'namenode-ha': 'true'})
Example #16
0
    def configure_instance(self, address):
        """Configure MySQL instance for clustering.

        :param self: Self
        :type self: MySQLInnoDBClusterCharm instance
        :param address: Address of the MySQL instance to be configured
        :type address: str
        :side effect: Executes MySQL Shell script to configure the instance for
                      clustering
        :returns: This function is called for its side effect
        :rtype: None
        """
        if reactive.is_flag_set(
                "leadership.set.cluster-instance-configured-{}".format(
                    address)):
            ch_core.hookenv.log(
                "Instance: {}, already configured.".format(address), "WARNING")
            return

        ch_core.hookenv.log(
            "Configuring instance for clustering: {}.".format(address), "INFO")
        _script_template = """
        dba.configureInstance('{}:{}@{}');
        var myshell = shell.connect('{}:{}@{}');
        myshell.runSql("RESTART;");
        """

        with tempfile.NamedTemporaryFile(mode="w", suffix=".js") as _script:
            _script.write(
                _script_template.format(self.cluster_user,
                                        self.cluster_password, address,
                                        self.cluster_user,
                                        self.cluster_password, address))
            _script.flush()

            cmd = ([self.mysqlsh_bin, "--no-wizard", "-f", _script.name])
            try:
                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                ch_core.hookenv.log(
                    "Failed configuring instance {}: {}".format(
                        address, e.output.decode("UTF-8")), "ERROR")
                return

        # After configuration of the remote instance, the remote instance
        # restarts mysql. We need to pause here for that to complete.
        self._wait_until_connectable(username=self.cluster_user,
                                     password=self.cluster_password,
                                     address=address)

        ch_core.hookenv.log("Instance Configured {}: {}".format(
            address, output.decode("UTF-8")),
                            level="DEBUG")
        leadership.leader_set(
            {"cluster-instance-configured-{}".format(address): True})
Example #17
0
def nuke_certs():
    ''' If the certificate relation is broken, we need to forget about
    our certificates and wait for new ones. '''
    hookenv.status_set('maintenance',
                       'Removing until certificate relation established')
    is_leader = is_flag_set('leadership.is_leader')
    if is_leader:
        leader_set({'cert': None, 'key': None})
        _remove_certificate()
        _remove_deployment()
    _remove_webhook()
Example #18
0
def obtain_munge_key(*args):
    # get flags
    munge_key = hookenv.config().get('munge_key')
    # Generate a munge key if it has not been provided via charm config
    if not munge_key:
        hookenv.log(
            'obtain_munge_key(): No key in charm config, generating new key')
        munge_key = host.pwgen(length=4096)
    else:
        hookenv.log('obtain_munge_key(): Using key from charm config')
    leadership.leader_set(munge_key=munge_key)
Example #19
0
    def add_instance_to_cluster(self, address):
        """Add MySQL instance to the cluster.

        :param self: Self
        :type self: MySQLInnoDBClusterCharm instance
        :param address: Address of the MySQL instance to be configured
        :type address: str
        :side effect: Executes MySQL Shell script to add the MySQL instance to
                      the cluster
        :returns: This function is called for its side effect
        :rtype: None
        """
        if reactive.is_flag_set(
                "leadership.set.cluster-instance-clustered-{}".format(
                    address)):
            ch_core.hookenv.log(
                "Instance: {}, already clustered.".format(address), "WARNING")
            return

        ch_core.hookenv.log(
            "Adding instance, {}, to the cluster.".format(address), "INFO")
        _script_template = """
        shell.connect("{}:{}@{}")
        var cluster = dba.getCluster("{}");

        print("Adding instances to the cluster.");
        cluster.addInstance(
            {{user: "******", host: "{}", password: "******", port: "3306"}},
            {{recoveryMethod: "clone"}});
        """

        with tempfile.NamedTemporaryFile(mode="w", suffix=".js") as _script:
            _script.write(
                _script_template.format(self.cluster_user,
                                        self.cluster_password,
                                        self.cluster_address,
                                        self.options.cluster_name,
                                        self.cluster_user, address,
                                        self.cluster_password))
            _script.flush()

            cmd = ([self.mysqlsh_bin, "--no-wizard", "-f", _script.name])
            try:
                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                ch_core.hookenv.log(
                    "Failed adding instance {} to cluster: {}".format(
                        address, e.output.decode("UTF-8")), "ERROR")
                return
        ch_core.hookenv.log("Instance Clustered {}: {}".format(
            address, output.decode("UTF-8")),
                            level="DEBUG")
        leadership.leader_set(
            {"cluster-instance-clustered-{}".format(address): True})
Example #20
0
def write_cert_to_leadership_data():
    cert_ep = endpoint_from_flag('certificates.certs.available')
    my_cert = cert_ep.server_certs_map[_get_cert_common_name()]

    leader_set({'cert': my_cert.cert, 'key': my_cert.key})
    # we also use this time to generate the cluster id
    if not leader_get('cluster_id'):
        cluster_id = ''.join(
            random.choice(string.ascii_letters + string.digits)
            for i in range(24))
        leader_set({'cluster_id': cluster_id})
def announce_leader_ready():
    """Announce leader is ready.

    At this point ovn-ctl has taken care of initialization of OVSDB databases
    and OVSDB servers for the Northbound- and Southbound- databases are
    running.

    Signal to our peers that they should render configurations and start their
    database processes.
    """
    # FIXME use the OVSDB cluster and/or server IDs here?
    leadership.leader_set({'ready': True})
def process_snapd_timer():
    ''' Set the snapd refresh timer on the leader so all cluster members
    (present and future) will refresh near the same time. '''
    # Get the current snapd refresh timer; we know layer-snap has set this
    # when the 'snap.refresh.set' flag is present.
    timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')

    # The first time through, data_changed will be true. Subsequent calls
    # should only update leader data if something changed.
    if data_changed('worker_snapd_refresh', timer):
        hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
        leader_set({'snapd_refresh': timer})
Example #23
0
def upgrade_charm():
    remove_state('calico.binaries.installed')
    remove_state('calico.service.installed')
    remove_state('calico.pool.configured')
    remove_state('calico.image.pulled')
    remove_state('calico.npc.deployed')
    if is_leader() and not leader_get('calico-v3-data-ready'):
        leader_set({
            'calico-v3-data-migration-needed': True,
            'calico-v3-npc-cleanup-needed': True,
            'calico-v3-completion-needed': True
        })
Example #24
0
def upgrade_v3_migrate_data():
    status.maintenance('Migrating data to Calico 3')
    try:
        calico_upgrade.configure()
        calico_upgrade.dry_run()
        calico_upgrade.start()
    except Exception:
        log(traceback.format_exc())
        message = 'Calico upgrade failed, see debug log'
        status.blocked(message)
        return
    leader_set({'calico-v3-data-migration-needed': None})
Example #25
0
def upgrade_v3_complete():
    status.maintenance('Completing Calico 3 upgrade')
    try:
        calico_upgrade.configure()
        calico_upgrade.complete()
        calico_upgrade.cleanup()
    except Exception:
        log(traceback.format_exc())
        message = 'Calico upgrade failed, see debug log'
        status.blocked(message)
        return
    leader_set({'calico-v3-completion-needed': None})
Example #26
0
def check_cluster(zkpeer):
    '''
    Checkup on the state of the cluster. Start a rolling restart if
    the peers have changed.

    '''
    zk = Zookeeper()
    if data_changed('zkpeer.nodes', zk.read_peers()):
        peers = _ip_list(zk.sort_peers(zkpeer))
        nonce = time.time()
        hookenv.log('Quorum changed. Restart queue: {}'.format(peers))
        leader_set(restart_queue=json.dumps(peers),
                   restart_nonce=json.dumps(nonce))
Example #27
0
def create_certificate_authority():
    '''Return the CA and server certificates for this system. If the CA is
    empty, generate a self signged certificate authority.'''
    with chdir(easyrsa_directory):
        # The Common Name (CN) for a certificate must be an IP or hostname.
        cn = hookenv.unit_public_ip()
        # Create a self signed CA with the CN, stored pki/ca.crt
        build_ca = './easyrsa --batch "--req-cn={0}" build-ca nopass 2>&1'
        # Build a self signed Certificate Authority.
        check_call(split(build_ca.format(cn)))

        ca_file = 'pki/ca.crt'
        # Read the CA so it can be returned in leader data.
        with open(ca_file, 'r') as stream:
            certificate_authority = stream.read()

        key_file = 'pki/private/ca.key'
        # Read the private key so it can be set in leader data.
        with open(key_file, 'r') as stream:
            ca_key = stream.read()

        # Set these values on the leadership data.
        leader_set({'certificate_authority': certificate_authority})
        leader_set({'certificate_authority_key': ca_key})
        # Install the CA on this system as a trusted CA.
        install_ca(certificate_authority)
        # Create a client certificate for this CA.
        client_cert, client_key = create_client_certificate()
        # Set the client certificate and key on leadership data.
        leader_set({'client_certificate': client_cert})
        leader_set({'client_key': client_key})
        status_set('active', 'Certificiate Authority available')
    set_state('easyrsa.certificate.authority.available')
Example #28
0
def create_certificate_authority():
    '''Return the CA and server certificates for this system. If the CA is
    empty, generate a self signged certificate authority.'''
    with chdir(easyrsa_directory):
        # The Common Name (CN) for a certificate must be an IP or hostname.
        cn = hookenv.unit_public_ip()
        # Create a self signed CA with the CN, stored pki/ca.crt
        build_ca = './easyrsa --batch "--req-cn={0}" build-ca nopass 2>&1'
        # Build a self signed Certificate Authority.
        check_call(split(build_ca.format(cn)))

        ca_file = 'pki/ca.crt'
        # Read the CA so it can be returned in leader data.
        with open(ca_file, 'r') as stream:
            certificate_authority = stream.read()

        key_file = 'pki/private/ca.key'
        # Read the private key so it can be set in leader data.
        with open(key_file, 'r') as stream:
            ca_key = stream.read()

        # Set these values on the leadership data.
        leader_set({'certificate_authority': certificate_authority})
        leader_set({'certificate_authority_key': ca_key})
        # Install the CA on this system as a trusted CA.
        install_ca(certificate_authority)
        # Create a client certificate for this CA.
        client_cert, client_key = create_client_certificate()
        # Set the client certificate and key on leadership data.
        leader_set({'client_certificate': client_cert})
        leader_set({'client_key': client_key})
        status_set('active', 'Certificiate Authority available')
    set_state('easyrsa.certificate.authority.available')
Example #29
0
def set_temp_url_secret(*args):
    """Set Temp-Url-Key on storage account"""
    if not reactive.is_flag_set('leadership.is_leader'):
        return ch_core.hookenv.action_fail('action must be run on the leader '
                                           'unit.')
    if not reactive.is_flag_set('config.complete'):
        return ch_core.hookenv.action_fail('required relations are not yet '
                                           'available, please defer action'
                                           'until deployment is complete.')
    identity_service = reactive.endpoint_from_flag(
        'identity-credentials.available')
    try:
        keystone_session = api_utils.create_keystone_session(identity_service)
    except Exception as e:
        ch_core.hookenv.action_fail(
            'Failed to create keystone session ("{}")'.format(e))

    os_cli = api_utils.OSClients(keystone_session)
    if os_cli.has_swift() is False:
        ch_core.hookenv.action_fail(
            'Swift not yet available. Please wait for deployment to finish')

    if os_cli.has_glance() is False:
        ch_core.hookenv.action_fail(
            'Glance not yet available. Please wait for deployment to finish')

    if "swift" not in os_cli.glance_stores:
        ch_core.hookenv.action_fail(
            'Glance does not support Swift storage backend. '
            'Please add relation between glance and ceph-radosgw/swift')

    current_secret = leadership.leader_get("temp_url_secret")
    current_swift_secret = os_cli.get_object_account_properties().get(
        'temp-url-key', None)

    if not current_secret or current_swift_secret != current_secret:
        secret = hashlib.sha1(str(uuid.uuid4()).encode()).hexdigest()
        os_cli.set_object_account_property("temp-url-key", secret)
        leadership.leader_set({"temp_url_secret": secret})
        # render configs on leader, and assess status. Every other unit
        # will render theirs when leader-settings-changed executes.
        shared_db = reactive.endpoint_from_flag('shared-db.available')
        ironic_api = reactive.endpoint_from_flag('ironic-api.available')
        amqp = reactive.endpoint_from_flag('amqp.available')

        with charm.provide_charm_instance() as ironic_charm:
            ironic_charm.render_with_interfaces(
                charm.optional_interfaces(
                    (identity_service, shared_db, ironic_api, amqp)))
            ironic_charm._assess_status()
Example #30
0
def _maybe_generate_passwords():
    """
    If the leader hasn't generated passwords yet, generate them.

    """
    if not leader_get('passwords'):
        admin_pass = subprocess.check_output(['pwgen',
                                              '-N1']).strip().decode('utf-8')
        repl_pass = subprocess.check_output(['pwgen',
                                             '-N1']).strip().decode('utf-8')
        leader_set(passwords=json.dumps({
            'admin_pass': admin_pass,
            'repl_pass': repl_pass
        }))
Example #31
0
def config_changed():
    hookenv.log('config_changed(): leader detected charm config change')
    charmconf = hookenv.config()
    # We are only interested if the munge_key has changed
    if charmconf.changed('munge_key'):
        hookenv.log('config:changed(): munge key has changed')
        prev_key = charmconf.previous('munge_key')
        munge_key = charmconf['munge_key']
        hookenv.log('config:changed(): previous key: %s' % prev_key)
        hookenv.log('config:changed(): new key: %s' % munge_key)
        leadership.leader_set(munge_key=munge_key)
        # clear munge.configured and munge.exposed
        flags.clear_flag('munge.configured')
        flags.clear_flag('munge.exposed')
        flags.set_flag('munge.changed_key_file')
Example #32
0
def setProperties():
    startDCOS()
    if os.path.isfile('/var/lib/dcos/cluster-id'):
        text_file = open('/var/lib/dcos/cluster-id')
    else:
        text_file = open('/var/lib/dcos/cluster-id.tmp')
    cid = text_file.read()
    text_file.close()
    text_file = open('/var/lib/dcos/auth-token-secret')
    ats = text_file.read()
    text_file.close()
    leader_set(cluster=cid)
    leader_set(authtoken=ats)
    set_state('dcos-master.running')
    status_set('active', 'DC/OS started')
Example #33
0
def check_cluster(zkpeer):
    '''
    Checkup on the state of the cluster. Start a rolling restart if
    the peers have changed.

    '''
    zk = Zookeeper()
    if data_changed('zkpeer.nodes', zk.read_peers()):
        peers = _ip_list(zk.sort_peers(zkpeer))
        nonce = time.time()
        hookenv.log('Quorum changed. Restart queue: {}'.format(peers))
        leader_set(
            restart_queue=json.dumps(peers),
            restart_nonce=json.dumps(nonce)
        )
Example #34
0
def prepare_default_client_credentials():
    """ Generate a downloadable package for clients to use to speak to the
    swarm cluster. """

    # Leverage TLSLib to copy the default cert from PKI
    client_cert(None, "./swarm_credentials/cert.pem")
    client_key(None, "./swarm_credentials/key.pem")
    ca(None, "./swarm_credentials/ca.pem")

    with open("swarm_credentials/key.pem", "r") as fp:
        key_contents = fp.read()
    with open("swarm_credentials/cert.pem", "r") as fp:
        crt_contents = fp.read()

    leader_set({"client_cert": crt_contents, "client_key": key_contents})
Example #35
0
def prepare_default_client_credentials():
    """ Generate a downloadable package for clients to use to speak to the
    swarm cluster. """

    # Leverage TLSLib to copy the default cert from PKI
    client_cert(None, './swarm_credentials/cert.pem')
    client_key(None, './swarm_credentials/key.pem')
    ca(None, './swarm_credentials/ca.pem')

    with open('swarm_credentials/key.pem', 'r') as fp:
        key_contents = fp.read()
    with open('swarm_credentials/cert.pem', 'r') as fp:
        crt_contents = fp.read()

    leader_set({'client_cert': crt_contents, 'client_key': key_contents})
Example #36
0
def update_restart_queue(zkpeer):
    '''
    If a Zookeeper node has restarted as part of a rolling restart,
    pop it off of the queue.

    '''
    queue = json.loads(leader_get('restart_queue') or '[]')
    if not queue:
        return

    restarted_nodes = _ip_list(zkpeer.restarted_nodes())
    new_queue = [node for node in queue if node not in restarted_nodes]

    if new_queue != queue:
        hookenv.log('Leader updating restart queue: {}'.format(queue))
        leader_set(restart_queue=json.dumps(new_queue))
Example #37
0
def update_controller_ip_port_list():
    """Load state from Neutron and update ``controller-ip-port-list``."""
    identity_service = reactive.endpoint_from_flag(
        'identity-service.available')
    leader_ip_list = leadership.leader_get('controller-ip-port-list') or []

    try:
        neutron_ip_list = sorted(api_crud.get_port_ips(identity_service))
    except api_crud.APIUnavailable as e:
        ch_core.hookenv.log('Neutron API not available yet, deferring '
                            'port discovery. ("{}")'.format(e),
                            level=ch_core.hookenv.DEBUG)
        return
    if neutron_ip_list != sorted(leader_ip_list):
        leadership.leader_set(
            {'controller-ip-port-list': json.dumps(neutron_ip_list)})
Example #38
0
def reset_default_password():
    # We need a big timeout here, as the cassandra user actually
    # springs into existence some time after Cassandra has started
    # up and is accepting connections.
    with cassandra.connect('cassandra', 'cassandra', timeout=180) as session:
        # But before we close this security hole, we need to use these
        # credentials to create a different admin account.
        helpers.status_set('maintenance', 'Creating initial superuser account')
        username, password = '******', host.pwgen()
        pwhash = cassandra.encrypt_password(password)
        cassandra.ensure_user(session, username, pwhash, superuser=True)
        leadership.leader_set(username=username, password=password)
        helpers.status_set('maintenance', 'Changing default admin password')
        cassandra.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
                        ConsistencyLevel.ALL, (host.pwgen(),))
    hookenv.leader_set(default_admin_password_changed=True)
def init_ha_active(datanode, cluster):
    """
    Do initial HA setup on the leader.
    """
    local_hostname = hookenv.local_unit().replace('/', '-')
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    hdfs.stop_namenode()
    remove_state('namenode.started')
    # initial cluster is us (active) plus a standby
    set_cluster_nodes([local_hostname, cluster.nodes()[0]])
    update_ha_config(datanode)
    hdfs.init_sharededits()
    hdfs.start_namenode()
    leadership.leader_set({'ha-initialized': 'true'})
    set_state('namenode.started')
Example #40
0
def update_seeds():
    seed_ips = cassandra.get_seed_ips()
    hookenv.log('Current seeds: {!r}'.format(seed_ips), DEBUG)

    bootstrapped_ips = get_bootstrapped_ips()
    hookenv.log('Bootstrapped: {!r}'.format(bootstrapped_ips), DEBUG)

    # Remove any seeds that are no longer bootstrapped, such as dropped
    # units.
    seed_ips.intersection_update(bootstrapped_ips)

    # Add more bootstrapped nodes, if necessary, to get to our maximum
    # of 3 seeds.
    for ip in bootstrapped_ips:
        if len(seed_ips) >= 3:
            break
        seed_ips.add(ip)

    hookenv.log('Updated seeds: {!r}'.format(seed_ips), DEBUG)
    leadership.leader_set(seeds=','.join(sorted(seed_ips)))
Example #41
0
def etcd_data_change(etcd):
    ''' Etcd scale events block master reconfiguration due to the
        kubernetes-master.components.started state. We need a way to
        handle these events consistenly only when the number of etcd
        units has actually changed '''

    # key off of the connection string
    connection_string = etcd.get_connection_string()

    # If the connection string changes, remove the started state to trigger
    # handling of the master components
    if data_changed('etcd-connect', connection_string):
        remove_state('kubernetes-master.components.started')

    # We are the leader and the etcd_version is not set meaning
    # this is the first time we connect to etcd.
    if is_state('leadership.is_leader') and not leader_get('etcd_version'):
        if etcd.get_version().startswith('3.'):
            leader_set(etcd_version='etcd3')
        else:
            leader_set(etcd_version='etcd2')
Example #42
0
def setup_leader_authentication():
    '''Setup basic authentication and token access for the cluster.'''
    service_key = '/root/cdk/serviceaccount.key'
    basic_auth = '/root/cdk/basic_auth.csv'
    known_tokens = '/root/cdk/known_tokens.csv'

    hookenv.status_set('maintenance', 'Rendering authentication templates.')

    keys = [service_key, basic_auth, known_tokens]
    # Try first to fetch data from an old leadership broadcast.
    if not get_keys_from_leader(keys) \
            or is_state('reconfigure.authentication.setup'):
        last_pass = get_password('basic_auth.csv', 'admin')
        setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')

        if not os.path.isfile(known_tokens):
            touch(known_tokens)

        # Generate the default service account token key
        os.makedirs('/root/cdk', exist_ok=True)
        if not os.path.isfile(service_key):
            cmd = ['openssl', 'genrsa', '-out', service_key,
                   '2048']
            check_call(cmd)
        remove_state('reconfigure.authentication.setup')

    # read service account key for syndication
    leader_data = {}
    for f in [known_tokens, basic_auth, service_key]:
        with open(f, 'r') as fp:
            leader_data[f] = fp.read()

    # this is slightly opaque, but we are sending file contents under its file
    # path as a key.
    # eg:
    # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
    leader_set(leader_data)
    remove_state('kubernetes-master.components.started')
    set_state('authentication.setup')
Example #43
0
def restart_for_quorum(zkpeer):
    '''
    If we're the next node in the restart queue, restart, and then
    inform the leader that we've restarted. (If we are the leader,
    remove ourselves from the queue, and update the leadership data.)

    '''
    private_address = hookenv.unit_get('private-address')
    queue = json.loads(leader_get('restart_queue') or '[]')

    if not queue:
        # Everything has restarted.
        return

    if private_address == queue[0]:
        # It's our turn to restart.
        _restart_zookeeper('rolling restart for quorum update')
        if is_state('leadership.is_leader'):
            queue = queue[1:]
            hookenv.log('Leader updating restart queue: {}'.format(queue))
            leader_set(restart_queue=json.dumps(queue))
        else:
            zkpeer.inform_restart()
Example #44
0
def publish_credentials(rel, superuser):
    pub = rel.to_publish_raw
    config = cassandra.config()
    if config['authenticator'].lower() == 'allowallauthenticator':
        if 'username' in pub:
            del pub['username']
            del pub['password']
        return
    if 'username' in pub:
        hookenv.log("Credentials for {} ({}) already published".format(rel.application_name, rel.relation_id))
        return
    hookenv.log("Publishing credentials for {} ({})".format(rel.application_name, rel.relation_id))
    assert rel.application_name, 'charms.reactive Relation failed to provide application_name property'
    username = '******'.format(rel.application_name)
    if superuser:
        username += '_admin'
    password = host.pwgen()
    pwhash = cassandra.encrypt_password(password)
    with cassandra.connect() as session:
        cassandra.ensure_user(session, username, pwhash, superuser)
    pub['username'] = username
    pub['password'] = password
    # Notify peers there are new credentials to be found.
    leadership.leader_set(client_rel_source=hookenv.local_unit(), client_rel_ping=str(time.time()))
Example #45
0
def check_for_upgrade_needed():
    '''An upgrade charm event was triggered by Juju, react to that here.'''
    hookenv.status_set('maintenance', 'Checking resources')

    migrate_from_pre_snaps()
    add_rbac_roles()
    set_state('reconfigure.authentication.setup')
    remove_state('authentication.setup')
    changed = snap_resources_changed()
    if changed == 'yes':
        set_upgrade_needed()
    elif changed == 'unknown':
        # We are here on an upgrade from non-rolling master
        # Since this upgrade might also include resource updates eg
        # juju upgrade-charm kubernetes-master --resource kube-any=my.snap
        # we take no risk and forcibly upgrade the snaps.
        # Forcibly means we do not prompt the user to call the upgrade action.
        set_upgrade_needed(forced=True)

    # Set the auto storage backend to etcd2.
    auto_storage_backend = leader_get('auto_storage_backend')
    is_leader = is_state('leadership.is_leader')
    if not auto_storage_backend and is_leader:
        leader_set(auto_storage_backend='etcd2')
Example #46
0
def open_swarm_manager_port():
    open_port(3376)
    # Tell the followers where to connect to the manager for internal
    # operations.
    leader_set({"swarm_manager": "tcp://{}:3376".format(unit_private_ip())})
Example #47
0
def ensure_nagios_credentials():
    leadership.leader_set(nagios_password=host.pwgen())
def generate_ssh_key():
    utils.generate_ssh_key('hdfs')
    leadership.leader_set({
        'ssh-key-priv': utils.ssh_priv_key('hdfs').text(),
        'ssh-key-pub': utils.ssh_pub_key('hdfs').text(),
    })
Example #49
0
def upgrade_for_etcd():
    # we are upgrading the charm.
    # If this is an old deployment etcd_version is not set
    # so if we are the leader we need to set it to v2
    if not leader_get('etcd_version') and is_state('leadership.is_leader'):
        leader_set(etcd_version='etcd2')
Example #50
0
def upgrade_credentials():
    '''Migrate credentials from pre-reactive charm deployment'''
    username = '******'.format(re.subn(r'\W', '_', hookenv.local_unit())[0])
    username, password = cassandra.get_cqlshrc_credentials(username)
    if username is not None and password is not None:
        leadership.leader_set(username=username, password=password)
Example #51
0
def initial_seeds():
    leadership.leader_set(seeds=cassandra.listen_ip_address())
    reactive.set_flag('cassandra.seeds.done')
def set_cluster_nodes(nodes):
    leadership.leader_set({
        'cluster-nodes': json.dumps(sorted(nodes)),
    })
Example #53
0
def send_fqdn():
    spark_master_host = get_fqdn()
    leadership.leader_set({'master-fqdn': spark_master_host})
    hookenv.log("Setting leader to {}".format(spark_master_host))
def format_zookeeper(zookeeper):
    update_zk_config(zookeeper)  # ensure config is up to date
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    hdfs.format_zookeeper()
    leadership.leader_set({'zk-formatted': 'true'})