Ejemplo n.º 1
0
def client_cert(source, destination, user=None, group=None):
    """
    Copy the client certificate to the destination creating directories if
    needed and assign ownership if set.

    :param string source: The path to look for the certificate, if None
    the certificate will be copied from the default location.
    :param string destination: The path to save the certificate.
    :param string user: The optional name of the user to own the certificate.
    :param string group: The optional name of the group to own certificate.
    """
    _ensure_directory(destination, user, group)

    if not source:
        # When source not specified use the default client certificate path.
        source = os.path.join(charm_dir(),
                              'easy-rsa/easyrsa3/pki/issued/client.crt')

    # Check for the client certificate.
    if os.path.isfile(source):
        # Copy the client certificate to the destination.
        copy2(source, destination)
    else:
        # No client certificate file, get the value from unit data.
        client_cert_key = 'tls.client.certificate'
        # Save the certificate data to the destination.
        _save_unitdata(client_cert_key, destination)

    chown(destination, user, group)

    # Set the destination path for the client certificate path on the unitdata.
    unitdata.kv().set('client-cert-path', destination)
    def test_check_reverse_dns(self, mock_hookenv, mock_utils,
                               mock_sub, mock_socket):
        '''
        Verify that we set the reverse_dns_ok state, and handle errors
        correctly.

        '''
        # Test the case where things succeed.
        mock_sub.check_output.return_value = b'domain'
        self.bigtop.check_reverse_dns()
        self.assertTrue(unitdata.kv().get('reverse_dns_ok'))

        # Test the case where we get an exception.
        mock_sub.check_output.return_value = b'localdomain'
        self.bigtop.check_reverse_dns()
        self.assertFalse(unitdata.kv().get('reverse_dns_ok'))

        class MockHError(Exception):
            pass

        def raise_herror(*args, **kwargs):
            raise MockHError('test')
        mock_socket.herror = MockHError
        mock_socket.gethostbyaddr = raise_herror

        self.bigtop.check_reverse_dns()
        self.assertFalse(unitdata.kv().get('reverse_dns_ok'))
def send_info(datanode):
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    local_hostname = hookenv.local_unit().replace("/", "-")
    hdfs_port = hadoop.dist_config.port("namenode")
    webhdfs_port = hadoop.dist_config.port("nn_webapp_http")

    utils.update_kv_hosts({node["ip"]: node["host"] for node in datanode.nodes()})
    utils.manage_etc_hosts()

    datanode.send_spec(hadoop.spec())
    datanode.send_namenodes([local_hostname])
    datanode.send_ports(hdfs_port, webhdfs_port)
    datanode.send_ssh_key(utils.get_ssh_key("hdfs"))
    datanode.send_hosts_map(utils.get_kv_hosts())

    slaves = [node["host"] for node in datanode.nodes()]
    if data_changed("namenode.slaves", slaves):
        unitdata.kv().set("namenode.slaves", slaves)
        hdfs.register_slaves(slaves)

    hookenv.status_set(
        "active", "Ready ({count} DataNode{s})".format(count=len(slaves), s="s" if len(slaves) > 1 else "")
    )
    set_state("namenode.ready")
Ejemplo n.º 4
0
    def install_hadoop(self):
        hadoop_version = self.dist_config.hadoop_version
        try:
            jujuresources.install('hadoop-%s-%s' %
                                  (hadoop_version,
                                   self.cpu_arch),
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=True)
        except KeyError:
            hookenv.log("Falling back to non-version specific download of hadoop...")
            jujuresources.install('hadoop-%s' %
                                  (self.cpu_arch),
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=True)

        # Install our lzo compression codec if it's defined in resources.yaml
        try:
            jujuresources.install('hadoop-lzo-%s' % self.cpu_arch,
                                  destination=self.dist_config.path('hadoop'),
                                  skip_top_level=False)
            unitdata.kv().set('hadoop.lzo.installed', True)
        except KeyError:
            msg = ("The hadoop-lzo-%s resource was not found."
                   "LZO compression will not be available." % self.cpu_arch)
            hookenv.log(msg)
Ejemplo n.º 5
0
def remove_state(state):
    """Remove / deactivate a state"""
    old_states = get_states()
    unitdata.kv().unset('reactive.states.%s' % state)
    unitdata.kv().set('reactive.dispatch.removed_state', True)
    if state in old_states:
        StateWatch.change(state)
Ejemplo n.º 6
0
def initialize_leadership_state():
    '''Initialize leadership.* states from the hook environment.

    Invoked by hookenv.atstart() so states are available in
    @hook decorated handlers.
    '''
    is_leader = hookenv.is_leader()
    if is_leader:
        hookenv.log('Initializing Leadership Layer (is leader)')
    else:
        hookenv.log('Initializing Leadership Layer (is follower)')

    reactive.helpers.toggle_state('leadership.is_leader', is_leader)

    previous = unitdata.kv().getrange('leadership.settings.', strip=True)
    current = hookenv.leader_get()

    # Handle deletions.
    for key in set(previous.keys()) - set(current.keys()):
        current[key] = None

    any_changed = False
    for key, value in current.items():
        reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
                                      value != previous.get(key))
        if value != previous.get(key):
            any_changed = True
        reactive.helpers.toggle_state('leadership.set.{}'.format(key),
                                      value is not None)
    reactive.helpers.toggle_state('leadership.changed', any_changed)

    unitdata.kv().update(current, prefix='leadership.settings.')
Ejemplo n.º 7
0
    def set_local(self, key=None, value=None, data=None, **kwdata):
        """
        Locally store some data associated with this conversation.

        Data can be passed in either as a single dict, or as key-word args.

        For example, if you need to store the previous value of a remote field
        to determine if it has changed, you can use the following::

            prev = conversation.get_local('field')
            curr = conversation.get_remote('field')
            if prev != curr:
                handle_change(prev, curr)
                conversation.set_local('field', curr)

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.

        :param str key: The name of a field to set.
        :param value: A value to set.
        :param dict data: A mapping of keys to values.
        :param \*\*kwdata: A mapping of keys to values, as keyword arguments.
        """
        if data is None:
            data = {}
        if key is not None:
            data[key] = value
        data.update(kwdata)
        if not data:
            return
        unitdata.kv().update(data, prefix='%s.%s.' % (self.key, 'local-data'))
Ejemplo n.º 8
0
    def configure_remote_db(self, mysql):
        hive_site = self.dist_config.path('hive_conf') / 'hive-site.xml'
        jdbc_url = \
            "jdbc:mysql://{}:{}/{}?createDatabaseIfNotExist=true".format(
                mysql.host(), mysql.port(), mysql.database()
            )
        with utils.xmlpropmap_edit_in_place(hive_site) as props:
            props['javax.jdo.option.ConnectionURL'] = jdbc_url
            props['javax.jdo.option.ConnectionUserName'] = mysql.user()
            props['javax.jdo.option.ConnectionPassword'] = mysql.password()
            props['javax.jdo.option.ConnectionDriverName'] = \
                "com.mysql.jdbc.Driver"

        hive_env = self.dist_config.path('hive_conf') / 'hive-env.sh'
        utils.re_edit_in_place(hive_env, {
            r'.*export HIVE_AUX_JARS_PATH *=.*':
            ('export HIVE_AUX_JARS_PATH='
             '/usr/share/java/mysql-connector-java.jar'),
        })

        # Now that we have db connection info, init our schema (only once)
        remote_db = hookenv.remote_service_name()
        if not unitdata.kv().get('hive.schema.initialized.%s' % remote_db):
            tool_path = "{}/bin/schematool".format(
                self.dist_config.path('hive'))
            utils.run_as(
                'ubuntu', tool_path, '-initSchema', '-dbType', 'mysql')
            unitdata.kv().set('hive.schema.initialized.%s' % remote_db, True)
            unitdata.kv().flush(True)
def send_info(nodemanager):
    hadoop = get_hadoop_base()
    yarn = YARN(hadoop)
    local_hostname = hookenv.local_unit().replace('/', '-')
    port = hadoop.dist_config.port('resourcemanager')
    hs_http = hadoop.dist_config.port('jh_webapp_http')
    hs_ipc = hadoop.dist_config.port('jobhistory')

    utils.update_kv_hosts(nodemanager.hosts_map())
    utils.manage_etc_hosts()

    nodemanager.send_spec(hadoop.spec())
    nodemanager.send_resourcemanagers([local_hostname])
    nodemanager.send_ports(port, hs_http, hs_ipc)
    nodemanager.send_ssh_key(utils.get_ssh_key('yarn'))
    nodemanager.send_hosts_map(utils.get_kv_hosts())

    slaves = nodemanager.nodes()
    if data_changed('resourcemanager.slaves', slaves):
        unitdata.kv().set('resourcemanager.slaves', slaves)
        yarn.register_slaves(slaves)

    hookenv.status_set('active', 'Ready ({count} NodeManager{s})'.format(
        count=len(slaves),
        s='s' if len(slaves) > 1 else '',
    ))
    set_state('resourcemanager.ready')
Ejemplo n.º 10
0
def server_key(source, destination, user=None, group=None):
    """
    Copy the server key to the destination, creating directories if needed and
    assign ownership if set.

    :param string source: The directory to look for the key, if None the key
    will be copied from default location.
    :param string destination: The path to save the key.
    :param string user: The optional name of the user to own the key.
    :param string group: The optional name of the group to own key.
    """
    _ensure_directory(destination, user, group)

    if not source:
        # Must remove the path characters from the local unit name.
        key_name = local_unit().replace('/', '_')
        # The location of server key is easy-rsa/easyrsa3/pki/private
        source = \
            os.path.join(
                charm_dir(),
                'easy-rsa/easyrsa3/pki/private/{0}.key'.format(key_name))

    # Copy the key to the destination.
    copy2(source, destination)
    chown(destination, user, group)

    # Set the destination path for the client key path on the unitdata.
    unitdata.kv().set('server-key-path', destination)
 def disable_ha(self):
     spark_env = self.dist_config.path('spark_conf') / 'spark-env.sh'
     utils.re_edit_in_place(spark_env, {
         r'.*SPARK_DAEMON_JAVA_OPTS.*': '# SPARK_DAEMON_JAVA_OPTS',
     })
     unitdata.kv().set('zookeepers.available', False)
     unitdata.kv().flush(True)
Ejemplo n.º 12
0
 def _unitdata_cmd(action, key, value):
     if action == 'get':
         return unitdata.kv().get(key)
     elif action == 'set':
         unitdata.kv().set(key, value)
         unitdata.kv().flush()
         return ''
Ejemplo n.º 13
0
def leader_set(settings=None, **kw):
    """Change leadership settings, per charmhelpers.core.hookenv.leader_set.

    The leadership.set.{key} reactive state will be set while the
    leadership hook environment setting remains set.

    Changed leadership settings will set the leadership.changed.{key}
    and leadership.changed states. These states will remain set until
    the following hook.

    These state changes take effect immediately on the leader, and
    in future hooks run on non-leaders. In this way both leaders and
    non-leaders can share handlers, waiting on these states.
    """
    settings = settings or {}
    settings.update(kw)
    previous = unitdata.kv().getrange("leadership.settings.", strip=True)

    for key, value in settings.items():
        if value != previous.get(key):
            reactive.set_state("leadership.changed.{}".format(key))
            reactive.set_state("leadership.changed")
        reactive.helpers.toggle_state("leadership.set.{}".format(key), value is not None)
    hookenv.leader_set(settings)
    unitdata.kv().update(settings, prefix="leadership.settings.")
def send_info(datanode):
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    local_hostname = hookenv.local_unit().replace('/', '-')
    hdfs_port = hadoop.dist_config.port('namenode')
    webhdfs_port = hadoop.dist_config.port('nn_webapp_http')

    utils.update_kv_hosts(datanode.hosts_map())
    utils.manage_etc_hosts()

    datanode.send_spec(hadoop.spec())
    datanode.send_namenodes([local_hostname])
    datanode.send_ports(hdfs_port, webhdfs_port)
    datanode.send_ssh_key(utils.get_ssh_key('hdfs'))
    datanode.send_hosts_map(utils.get_kv_hosts())

    slaves = datanode.nodes()
    if data_changed('namenode.slaves', slaves):
        unitdata.kv().set('namenode.slaves', slaves)
        hdfs.register_slaves(slaves)
        hdfs.refresh_slaves()

    hookenv.status_set('active', 'Ready ({count} DataNode{s})'.format(
        count=len(slaves),
        s='s' if len(slaves) > 1 else '',
    ))
    set_state('namenode.ready')
Ejemplo n.º 15
0
def ca(source, destination, user=None, group=None):
    """
    Copy the Certificate Authority (CA) to the destination, creating parent
    directories if needed and assign owner if set. The tls layer installs the
    CA on all the peers in /usr/local/share/ca-certificates/.

    :param string source: The path to look or the certificate, if None the
    CA will be copied from the default location.
    :param string destination: The path to save the CA certificate.
    :param string user: The optional user name to own the CA certificate.
    :param string group: The optional group name to own the CA certificate.
    """
    _ensure_directory(destination, user, group)

    if not source:
        # When source not specified use the default CA path.
        source = \
            '/usr/local/share/ca-certificates/{0}.crt'.format(service_name())

    # Copy the ca certificate to the destination directory.
    copy2(source, destination)
    chown(destination, user, group)

    # Set the destination path for the ca certificate path on the unitdata.
    unitdata.kv().set('ca-cert-path', destination)
Ejemplo n.º 16
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
Ejemplo n.º 17
0
def server_cert(source, destination, user=None, group=None):
    """
    Copy the server certificate to the destination, creating directories if
    needed and assign ownership if set.

    :param string source: The directory to look for the certificate, if None
    the certificate will be copied from unit data.
    :param string destination: The path to save the certificate.
    :param string user: The optional name of the user to own the certificate.
    :param string group: The optional name of the group to own certificate.
    """
    _ensure_directory(destination, user, group)

    if not source:
        # Must remove the path characters from the local unit name.
        key_name = local_unit().replace('/', '_')
        # The location of server certificate is easy-rsa/easyrsa3/pki/issued
        source = \
            os.path.join(
                charm_dir(),
                'easy-rsa/easyrsa3/pki/issued/{0}.crt'.format(key_name))

    if os.path.isfile(source):
        # Copy the server certificate to the destination.
        copy2(source, destination)
    else:
        # No source server certificate, get the value from unit data.
        server_cert_key = 'tls.server.certificate'
        # Save the certificate data to the destination directory.
        _save_unitdata(server_cert_key, destination)

    chown(destination, user, group)
    # Set the destination path for the client certificate path on the unitdata.
    unitdata.kv().set('server-cert-path', destination)
Ejemplo n.º 18
0
def clear_config_states():
    from charmhelpers.core import hookenv, unitdata
    from charms.reactive import remove_state
    config = hookenv.config()
    remove_state('config.changed')
    for opt in config.keys():
        remove_state('config.changed.{}'.format(opt))
    unitdata.kv().flush()
Ejemplo n.º 19
0
def set_state(state, value=None):
    """
    Set the given state as active, optionally associating with a relation.
    """
    old_states = get_states()
    unitdata.kv().update({state: value}, prefix='reactive.states.')
    if state not in old_states:
        StateWatch.change(state)
Ejemplo n.º 20
0
 def invoke(self):
     """
     Call the external handler to be invoked.
     """
     # flush to ensure external process can see states as they currently
     # are, and write states (flush releases lock)
     unitdata.kv().flush()
     subprocess.check_call([self._filepath, '--invoke', self._test_output], env=os.environ)
Ejemplo n.º 21
0
def set_cert(key, certificate):
    """Set the certificate on the key value store of the unit, and set
    the corresponding state for layers to consume."""
    # Set cert on the unitdata key value store so other layers can get it.
    unitdata.kv().set(key, certificate)
    # Set the final state for the other layers to know when they can
    # retrieve the server certificate.
    set_state("{0} available".format(key))
Ejemplo n.º 22
0
 def action(self):
     """
     Execute the external handler, which should perform its own predicate
     and reinvocation checking.
     """
     # flush to ensure external process can see states as they currently
     # are, and write states (flush releases lock)
     unitdata.kv().flush()
     subprocess.check_call([self.filepath], env=os.environ)
Ejemplo n.º 23
0
    def trigger_bigtop(self):
        '''
        Trigger the Bigtop puppet recipe that handles the Zeppelin service.
        '''
        bigtop = Bigtop()
        overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
                                           strip=True)

        # The zep deb depends on spark-core which unfortunately brings in
        # most of hadoop. Include appropriate roles here to ensure these
        # packages are configured in the same way as our other Bigtop
        # software deployed with puppet.
        bigtop.render_site_yaml(
            roles=[
                'spark-client',
                'spark-yarn-slave',
                'zeppelin-server',
            ],
            overrides=overrides,
        )

        # NB: during an upgrade, we configure the site.yaml, but do not
        # trigger puppet. The user must do that with the 'reinstall' action.
        if unitdata.kv().get('zeppelin.version.repo', False):
            hookenv.log("An upgrade is available and the site.yaml has been "
                        "configured. Run the 'reinstall' action to continue.",
                        level=hookenv.INFO)
        else:
            ####################################################################
            # BUG: BIGTOP-2742
            # Default zeppelin init script looks for the literal '$(hostname)'
            # string. Symlink it so it exists before the apt install from puppet
            # tries to start the service.
            import subprocess
            host = subprocess.check_output(['hostname']).decode('utf8').strip()
            zepp_pid = '/var/run/zeppelin/zeppelin-zeppelin-{}.pid'.format(host)
            utils.run_as('root', 'mkdir', '-p', '/var/run/zeppelin')
            utils.run_as('root', 'ln', '-sf',
                         zepp_pid,
                         '/var/run/zeppelin/zeppelin-zeppelin-$(hostname).pid')
            ####################################################################

            bigtop.trigger_puppet()
            self.wait_for_api(30)

            ####################################################################
            # BUG: BIGTOP-2742
            # Puppet apply will call systemctl daemon-reload, which removes the
            # symlink we just created. Now that the bits are on disk, update the
            # init script $(hostname) that caused this mess to begin with.
            zepp_init_script = '/etc/init.d/zeppelin'
            utils.re_edit_in_place(zepp_init_script, {
                r'^# pidfile.*': '# pidfile: {}'.format(zepp_pid),
            })
            utils.run_as('root', 'systemctl', 'daemon-reload')
            self.restart()
            self.wait_for_api(30)
Ejemplo n.º 24
0
 def init_zkrest(self):
     # Zookeeper user needs to compile the rest contrib server.
     # So zookeeper needs to:
     # 1. Have a home dir for ant cache to exist
     # 2. Write to the /usr/lib/zookeeper
     chownr(self.dist_config.path('zookeeper'), 'zookeeper', 'zookeeper', chowntopdir=True)
     with chdir(self.dist_config.path('zookeeper')):
         utils.run_as('zookeeper', 'ant')
     unitdata.kv().set('rest.initialised', True)
Ejemplo n.º 25
0
def update_active_status():
    datanode = DataNode()
    if datanode.is_ready():
        hookenv.status_set('active', 'Ready (%s DataNodes)' % len(datanode.filtered_data()))
        unitdata.kv().set('charm.active', True)
    elif datanode.connected_units():
        hookenv.status_set('waiting', 'Waiting for compute slaves to provide DataNodes')
    else:
        hookenv.status_set('blocked', 'Waiting for relation to compute slaves')
Ejemplo n.º 26
0
 def format_namenode(self):
     if unitdata.kv().get('hdfs.namenode.formatted'):
         return
     self.stop_namenode()
     # Run without prompting; this will fail if the namenode has already
     # been formatted -- we do not want to reformat existing data!
     self._hdfs('namenode', '-format', '-noninteractive')
     unitdata.kv().set('hdfs.namenode.formatted', True)
     unitdata.kv().flush(True)
Ejemplo n.º 27
0
def update_active_status():
    nodemanager = NodeManager()
    if nodemanager.is_ready():
        hookenv.status_set('active', 'Ready (%s NodeManagers)' % len(nodemanager.filtered_data()))
        unitdata.kv().set('charm.active', True)
    elif nodemanager.connected_units():
        hookenv.status_set('waiting', 'Waiting for compute slaves to provide NodeManagers')
    else:
        hookenv.status_set('blocked', 'Waiting for relation to compute slaves')
 def any_changed():
     changed = False
     for filename in filenames:
         old_hash = unitdata.kv().get('reactive.when_file_changed.%s' % filename)
         new_hash = host.file_hash(filename, hash_type=kwargs.get('hash_type', 'md5'))
         if old_hash != new_hash:
             unitdata.kv().set('reactive.when_file_changed.%s' % filename, new_hash)
             changed = True  # mark as changed, but keep updating hashes
     return changed
Ejemplo n.º 29
0
def install():
    '''Install the easy-rsa software that is used by this layer.'''
    easyrsa_resource = None
    try:
        # Try to get the resource from Juju.
        easyrsa_resource = resource_get('easyrsa')
    except Exception as e:
        message = 'An error occurred fetching the easyrsa resource.'
        hookenv.log(message)
        hookenv.log(e)
        hookenv.status_set('blocked', message)
        return

    if not easyrsa_resource:
        hookenv.status_set('blocked', 'The easyrsa resource is missing.')
        return

    # Get the filesize in bytes.
    filesize = os.stat(easyrsa_resource).st_size
    # When the filesize is less than 10 KB we do not have a real file.
    if filesize < 10240:
        hookenv.status_set('blocked', 'The easyrsa resource is not complete.')
        return

    # Expand the archive in the charm directory creating an EasyRSA directory.
    untar = 'tar -xvzf {0} -C {1}'.format(easyrsa_resource, charm_directory)
    check_call(split(untar))

    version = get_version(easyrsa_resource)
    # Save the version in the key/value store of the charm.
    unitdata.kv().set('easyrsa-version', version)

    if os.path.islink(easyrsa_directory):
        check_call(split('rm -v {0}'.format(easyrsa_directory)))

    # Link the EasyRSA version directory to a common name.
    link = 'ln -v -s {0}/EasyRSA-{1} {2}'.format(charm_directory,
                                                 version,
                                                 easyrsa_directory)
    check_call(split(link))
    # The charm pki directory contains backup of pki for upgrades.
    charm_pki_directory = os.path.join(charm_directory, 'pki')
    if os.path.isdir(charm_pki_directory):
        new_pki_directory = os.path.join(easyrsa_directory, 'pki')
        # Only copy the directory if the new_pki_directory does not exist.
        if not os.path.isdir(new_pki_directory):
            # Copy the pki to this new directory.
            shutil.copytree(charm_pki_directory, new_pki_directory,
                            symlinks=True)
        # We are done with the old charm pki directory, so delete contents.
        shutil.rmtree(charm_pki_directory)
    else:
        # Create new pki.
        with chdir(easyrsa_directory):
            check_call(split('./easyrsa --batch init-pki 2>&1'))
    set_state('easyrsa.installed')
Ejemplo n.º 30
0
    def install(self):
        jujuresources.install(self.resources['hue'],
                              destination=self.dist_config.path('hue'),
                              skip_top_level=True)

        self.dist_config.add_users()
        self.dist_config.add_dirs()
        self.dist_config.add_packages()
        chownr(self.dist_config.path('hue'), 'hue', 'hadoop')
        unitdata.kv().set('hue.installed', True)
def set_meta_generation(gen):
    unitdata.kv().set(kvdata.KEY_META_GENERATION, gen)
Ejemplo n.º 32
0
    def __call__(self):
        try:
            import hvac
        except ImportError:
            # BUG: #1862085 - if the relation is made to vault, but the
            # 'encrypt' option is not made, then the charm errors with an
            # import warning.  This catches that, logs a warning, and returns
            # with an empty context.
            hookenv.log(
                "VaultKVContext: trying to use hvac pythong module "
                "but it's not available.  Is secrets-stroage relation "
                "made, but encrypt option not set?",
                level=hookenv.WARNING)
            # return an emptry context on hvac import error
            return {}
        ctxt = {}
        # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323
        db = unitdata.kv()
        # currently known-good secret-id
        secret_id = db.get('secret-id')

        for relation_id in hookenv.relation_ids(self.interfaces[0]):
            for unit in hookenv.related_units(relation_id):
                data = hookenv.relation_get(unit=unit, rid=relation_id)
                vault_url = data.get('vault_url')
                role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
                token = data.get('{}_token'.format(hookenv.local_unit()))

                if all([vault_url, role_id, token]):
                    token = json.loads(token)
                    vault_url = json.loads(vault_url)

                    # Tokens may change when secret_id's are being
                    # reissued - if so use token to get new secret_id
                    token_success = False
                    try:
                        secret_id = retrieve_secret_id(url=vault_url,
                                                       token=token)
                        token_success = True
                    except hvac.exceptions.InvalidRequest:
                        # Try next
                        pass

                    if token_success:
                        db.set('secret-id', secret_id)
                        db.flush()

                        ctxt['vault_url'] = vault_url
                        ctxt['role_id'] = json.loads(role_id)
                        ctxt['secret_id'] = secret_id
                        ctxt['secret_backend'] = self.secret_backend
                        vault_ca = data.get('vault_ca')
                        if vault_ca:
                            ctxt['vault_ca'] = json.loads(vault_ca)

                        self.complete = True
                        break
                    else:
                        if secret_id:
                            ctxt['vault_url'] = vault_url
                            ctxt['role_id'] = json.loads(role_id)
                            ctxt['secret_id'] = secret_id
                            ctxt['secret_backend'] = self.secret_backend
                            vault_ca = data.get('vault_ca')
                            if vault_ca:
                                ctxt['vault_ca'] = json.loads(vault_ca)

            if self.complete:
                break

        if ctxt:
            self.complete = True

        return ctxt
Ejemplo n.º 33
0
def _assess_status():
    """Assess status of relations and services for local unit"""
    if is_flag_set('snap.channel.invalid'):
        status_set(
            'blocked', 'Invalid snap channel '
            'configured: {}'.format(config('channel')))
        return
    if is_flag_set('config.dns_vip.invalid'):
        status_set('blocked', 'vip and dns-ha-access-record configured')
        return
    if is_flag_set('config.lb_vip.invalid'):
        status_set('blocked', 'lb-provider and vip are mutually exclusive')
        return
    if is_flag_set('config.lb_dns.invalid'):
        status_set(
            'blocked', 'lb-provider and dns-ha-access-record are '
            'mutually exclusive')
        return

    if unitdata.kv().get('charm.vault.series-upgrading'):
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return

    if is_flag_set('failed.to.start'):
        status_set("blocked",
                   "Vault failed to start; check journalctl -u vault")
        return

    _missing_interfaces = []
    _incomplete_interfaces = []

    _assess_interface_groups(REQUIRED_INTERFACES,
                             optional=False,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    if _missing_interfaces or _incomplete_interfaces:
        state = 'blocked' if _missing_interfaces else 'waiting'
        status_set(state,
                   ', '.join(_missing_interfaces + _incomplete_interfaces))
        return

    health = None
    if service_running('vault'):
        try:
            health = vault.get_vault_health()
        except Exception:
            log(traceback.format_exc(), level=ERROR)
            status_set('blocked', 'Vault health check failed')
            return
    else:
        status_set('blocked', 'Vault service not running')
        return

    if health.get('version'):
        application_version_set(health.get('version'))
    else:
        application_version_set('Unknown')
        status_set('blocked', 'Unknown vault version')
        return

    if not health['initialized']:
        status_set('blocked', 'Vault needs to be initialized')
        return

    if health['sealed']:
        status_set('blocked', 'Unit is sealed')
        return

    if not leader_get(vault.CHARM_ACCESS_ROLE_ID):
        status_set(
            'blocked',
            'Vault charm not yet authorized: run authorize-charm action.')
        return

    if not client_approle_authorized():
        status_set('blocked', 'Vault cannot authorize approle')
        return

    lb_provider = endpoint_from_name('lb-provider')
    is_leader = is_flag_set('leadership.is_leader')
    if is_leader and lb_provider and lb_provider.is_available:
        if not lb_provider.has_response:
            status_set('waiting', 'Waiting for load balancer')
            return
        response = lb_provider.get_response('vault')
        if response.error:
            status_set(
                'blocked', 'Load balancer failed: '
                '{}'.format(response.error_message or response.error_fields))
            return

    is_leader = is_flag_set('leadership.is_leader')
    has_ca = is_flag_set('charm.vault.ca.ready')
    has_cert_reqs = is_flag_set('certificates.certs.requested')
    if is_leader and has_cert_reqs and not has_ca:
        status_set('blocked', 'Missing CA cert')
        return

    has_certs_relation = is_flag_set('certificates.available')
    if is_leader and has_certs_relation and not has_ca:
        status_set('blocked', 'Missing CA cert')
        return

    _assess_interface_groups(OPTIONAL_INTERFACES,
                             optional=True,
                             missing_interfaces=_missing_interfaces,
                             incomplete_interfaces=_incomplete_interfaces)

    if _missing_interfaces or _incomplete_interfaces:
        state = 'blocked' if _missing_interfaces else 'waiting'
        status_set(state,
                   ', '.join(_missing_interfaces + _incomplete_interfaces))
        return

    mlock_disabled = is_container() or config('disable-mlock')

    vault_installed_version = snap.get_installed_version('vault')
    vault_running_version = health.get('version')
    if vault_installed_version != vault_running_version:
        status_set(
            'active',
            'New version of vault installed, manual intervention required '
            'to restart the service.')
        return

    if is_flag_set('etcd.tls.available'):
        client = vault.get_local_client()
        if not client.ha_status['ha_enabled']:
            status_set(
                'active',
                'Vault running as non-HA, manual intervention required '
                'to restart the service.')
            return

    status_set(
        'active', 'Unit is ready '
        '(active: {}, mlock: {})'.format(
            str(not health['standby']).lower(),
            'disabled' if mlock_disabled else 'enabled'))
Ejemplo n.º 34
0
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=c0111,c0103,c0301,c0412,e0401
import os
import subprocess as sp
import json
from charms.reactive import when, when_not, set_state
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import status_set, service_name

unitd = unitdata.kv()
FLOW_PATH = '/opt/dataflow'

@when('dataflow.available')
@when_not('activemq_dataflow.installed')
def install_activemq_dataflow(dataflow):
    set_state('activemq_dataflow.connected')
    unitd.set('nodes', ['node-red-node-mongodb', 'node-red-node-stomp'])
    status_set('blocked', 'Waiting for a relation with ActiveMQ and MongoDB')

################################################################################
# First Relation with Db then ActiveMQ
################################################################################
@when('db.available', 'activemq_dataflow.connected')
@when_not('activemq_dataflow.dbconnected')
def connect_to_db(db):
Ejemplo n.º 35
0
 def get_local(self, key, default=None):
     """
     Retrieve some data previously set via :meth:`set_local` for this conversation.
     """
     key = '%s.%s.%s' % (self.key, 'local-data', key)
     return unitdata.kv().get(key, default)
def configure_local_ephemeral_storage():
    """Configure local block device for use as ephemeral instance storage"""
    # Preflight check vault relation if encryption is enabled
    vault_kv = vaultlocker.VaultKVContext(
        secret_backend=vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    encrypt = config('encrypt')
    if encrypt and not vault_kv.complete:
        log("Encryption requested but vault relation not complete",
            level=DEBUG)
        return
    elif encrypt and vault_kv.complete:
        # NOTE: only write vaultlocker configuration once relation is complete
        #       otherwise we run the chance of an empty configuration file
        #       being installed on a machine with other vaultlocker based
        #       services
        vaultlocker.write_vaultlocker_conf(context, priority=80)

    db = kv()
    storage_configured = db.get('storage-configured', False)
    if storage_configured:
        log("Ephemeral storage already configured, skipping", level=DEBUG)
        return

    dev = determine_block_device()

    if not dev:
        log('No block device configuration found, skipping', level=DEBUG)
        return

    if not is_block_device(dev):
        log("Device '{}' is not a block device, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    # NOTE: this deals with a dm-crypt'ed block device already in
    #       use
    if is_device_mounted(dev):
        log("Device '{}' is already mounted, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    options = None
    if encrypt:
        dev_uuid = str(uuid.uuid4())
        check_call(['vaultlocker', 'encrypt', '--uuid', dev_uuid, dev])
        dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
        options = ','.join([
            "defaults",
            "nofail",
            ("x-systemd.requires="
             "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
            "comment=vaultlocker",
        ])

    # If not cleaned and in use, mkfs should fail.
    mkfs_xfs(dev, force=True)

    mountpoint = '/var/lib/nova/instances'
    filesystem = "xfs"
    mount(dev, mountpoint, filesystem=filesystem)
    fstab_add(dev, mountpoint, filesystem, options=options)

    check_call(['chown', '-R', 'nova:nova', mountpoint])
    check_call(['chmod', '-R', '0755', mountpoint])

    # NOTE: record preparation of device - this ensures that ephemeral
    #       storage is never reconfigured by mistake, losing instance disks
    db.set('storage-configured', True)
    db.flush()
Ejemplo n.º 37
0
 def stop(self):
     flink_appID = unitdata.kv().get('flink.ID')
     if flink_appID:
         utils.run_as('flink', 'yarn', 'application', '-kill', flink_appID)
     return 
Ejemplo n.º 38
0
 def _save(self):
     if not self:
         unitdata.kv().unset(self._cache_key)
     else:
         unitdata.kv().set(self._cache_key,
                           [item._serialize() for item in self])
Ejemplo n.º 39
0
def prepare_disks_and_activate():
    if use_vaultlocker():
        # NOTE: vault/vaultlocker preflight check
        vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
        context = vault_kv()
        if not vault_kv.complete:
            log('Deferring OSD preparation as vault not ready', level=DEBUG)
            return
        else:
            log('Vault ready, writing vaultlocker configuration', level=DEBUG)
            vaultlocker.write_vaultlocker_conf(context)

    osd_journal = get_journal_devices()
    if not osd_journal.isdisjoint(set(get_devices())):
        raise ValueError('`osd-journal` and `osd-devices` options must not'
                         'overlap.')
    log("got journal devs: {}".format(osd_journal), level=DEBUG)

    # pre-flight check of eligible device pristinity
    devices = get_devices()

    # if a device has been previously touched we need to consider it as
    # non-pristine. If it needs to be re-processed it has to be zapped
    # via the respective action which also clears the unitdata entry.
    db = kv()
    touched_devices = db.get('osd-devices', [])
    devices = [dev for dev in devices if dev not in touched_devices]
    log('Skipping osd devices previously processed by this unit: {}'.format(
        touched_devices))
    # filter osd-devices that are file system paths
    devices = [dev for dev in devices if dev.startswith('/dev')]
    # filter osd-devices that does not exist on this unit
    devices = [dev for dev in devices if os.path.exists(dev)]
    # filter osd-devices that are already mounted
    devices = [dev for dev in devices if not is_device_mounted(dev)]
    # filter osd-devices that are active bluestore devices
    devices = [
        dev for dev in devices if not ceph.is_active_bluestore_device(dev)
    ]

    log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG)
    if not all(ceph.is_pristine_disk(dev) for dev in devices):
        status_set(
            'blocked', 'Non-pristine devices detected, consult '
            '`list-disks`, `zap-disk` and `blacklist-*` actions.')
        return

    if is_osd_bootstrap_ready():
        log('ceph bootstrapped, rescanning disks')
        emit_cephconf()
        bluestore = use_bluestore()
        ceph.udevadm_settle()
        for dev in get_devices():
            ceph.osdize(dev, config('osd-format'), osd_journal,
                        config('ignore-device-errors'), config('osd-encrypt'),
                        bluestore, config('osd-encrypt-keymanager'))
            # Make it fast!
            if config('autotune'):
                log(
                    'The autotune config is deprecated and planned '
                    'for removal in the next release.',
                    level=WARNING)
                ceph.tune_dev(dev)
        ceph.start_osds(get_devices())

    # Notify MON cluster as to how many OSD's this unit bootstrapped
    # into the cluster
    for r_id in relation_ids('mon'):
        relation_set(relation_id=r_id,
                     relation_settings={
                         'bootstrapped-osds':
                         len(db.get('osd-devices', [])),
                         'ceph_release':
                         ceph.resolve_ceph_version(
                             hookenv.config('source') or 'distro')
                     })
def amqp_changed(relation_id=None,
                 remote_unit=None,
                 check_deferred_restarts=True):
    """Update amqp relations.

    :param relation_id: Relation id to update
    :type relation_id: str
    :param remote_unit: Remote unit on relation_id to update
    :type remote_unit: str
    :param check_deferred_events: Whether to check if restarts are
                                  permitted before running hook.
    :type check_deferred_events: bool
    """
    allowed, reason = is_hook_allowed(
        'amqp-relation-changed',
        check_deferred_restarts=check_deferred_restarts)
    if not allowed:
        log(reason, "WARN")
        return
    singleset = set(['username', 'vhost'])
    host_addr = ch_ip.get_relation_ip(
        rabbit_net_utils.AMQP_INTERFACE,
        cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))

    sent_update = False
    if rabbit.leader_node_is_ready():
        relation_settings = {
            'hostname': host_addr,
            'private-address': host_addr
        }
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            ttlname = current.get('ttlname')
            ttlreg = current.get('ttlreg')
            ttl = current.get('ttl')
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username,
                                      vhost,
                                      amqp_rid,
                                      admin=admin,
                                      ttlname=ttlname,
                                      ttlreg=ttlreg,
                                      ttl=ttl)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.items():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    ttlname = queues[amqp_rid].get('ttlname')
                    ttlreg = queues[amqp_rid].get('ttlreg')
                    ttl = queues[amqp_rid].get('ttl')
                    password = configure_amqp(username,
                                              vhost,
                                              amqp_rid,
                                              admin=admin,
                                              ttlname=ttlname,
                                              ttlreg=ttlreg,
                                              ttl=ttl)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log(
            "Updating relation {} keys {}".format(
                relation_id or get_relation_id(),
                ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
        sent_update = True
    elif not is_leader() and rabbit.client_node_is_ready():
        if not rabbit.clustered():
            log("This node is not clustered yet, defer sending data to client",
                level=DEBUG)
            return
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
                sent_update = True
    kvstore = kv()
    update_done = kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False)
    if sent_update and not update_done:
        kvstore.set(key=INITIAL_CLIENT_UPDATE_KEY, value=True)
        kvstore.flush()
def configure_amqp(username,
                   vhost,
                   relation_id,
                   admin=False,
                   ttlname=None,
                   ttlreg=None,
                   ttl=None):
    """Configure rabbitmq server.

    This function creates user/password, vhost and sets user permissions. It
    also enabales mirroring queues if requested.

    Calls to rabbitmqctl are costly and as such we aim to limit them by only
    doing them if we detect that a settings needs creating or updating. To
    achieve this we track what we set by storing key/value pairs associated
    with a particular relation id in a local database.

    Since this function is only supposed to be called by the cluster leader,
    the database is expected to be invalidated if it exists and we are no
    longer leader so as to ensure that a leader switch results in a
    rabbitmq configuraion consistent with the current leader's view.

    :param username: client username.
    :param vhost: vhost name.
    :param relation_id: optional relation id used to identify the context of
                        this operation. This should always be provided
                        so that we can track what has been set.
    :param admin: boolean value defining whether the new user is admin.
    :param ttlname: the name of ttl
    :param ttlreg: the regular expression of ttl
    :param ttl: the vaule of ttl
    :returns: user password
    """
    log(
        "Configuring rabbitmq for user '{}' vhost '{}' (rid={})".format(
            username, vhost, relation_id), DEBUG)

    if not relation_id:
        raise Exception("Invalid relation id '{}' provided to "
                        "{}()".format(relation_id, configure_amqp.__name__))

    # get and update service password
    password = rabbit.get_rabbit_password(username)

    expected = {
        'username': username,
        'vhost': vhost,
        'ttl': ttl,
        'mirroring-queues': config('mirroring-queues')
    }
    kvstore = kv()
    tracker = kvstore.get('amqp_config_tracker') or {}
    val = tracker.get(relation_id)
    if val == expected and not val.get('stale'):
        log(
            "Rabbit already configured for relation "
            "'{}'".format(relation_id), DEBUG)
        return password
    else:
        tracker[relation_id] = expected

    # update vhost
    rabbit.create_vhost(vhost)
    # NOTE(jamespage): Workaround until we have a good way
    #                  of generally disabling notifications
    #                  based on which services are deployed.
    if vhost == 'openstack':
        rabbit.configure_notification_ttl(vhost, config('notification-ttl'))
        rabbit.configure_ttl(vhost, ttlname, ttlreg, ttl)

    if admin:
        rabbit.create_user(username, password, ['administrator'])
    else:
        rabbit.create_user(username, password)
    rabbit.grant_permissions(username, vhost)

    # NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
    # queues is different
    # http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
    if config('mirroring-queues'):
        rabbit.set_ha_mode(vhost, 'all')

    kvstore.set(key='amqp_config_tracker', value=tracker)
    kvstore.flush()

    return password
Ejemplo n.º 42
0
    def configure(self, available_hosts, zk_units, peers):
        """
        This is the core logic of setting up spark.

        Two flags are needed:

          * Namenode exists aka HDFS is ready
          * Resource manager exists aka YARN is ready

        both flags are infered from the available hosts.

        :param dict available_hosts: Hosts that Spark should know about.
        """
        # Bootstrap spark
        if not unitdata.kv().get('spark.bootstrapped', False):
            self.setup()
            unitdata.kv().set('spark.bootstrapped', True)

        # Set KV based on connected applications
        unitdata.kv().set('zookeeper.units', zk_units)
        unitdata.kv().set('sparkpeer.units', peers)
        unitdata.kv().flush(True)

        # Get our config ready
        dc = self.dist_config
        events_log_dir = 'file://{}'.format(dc.path('spark_events'))
        mode = hookenv.config()['spark_execution_mode']
        master_ip = utils.resolve_private_address(available_hosts['spark-master'])
        master_url = self.get_master_url(master_ip)

        # Setup hosts dict
        hosts = {
            'spark': master_ip,
        }
        if 'namenode' in available_hosts:
            hosts['namenode'] = available_hosts['namenode']
            events_log_dir = self.setup_hdfs_logs()

        if 'resourcemanager' in available_hosts:
            hosts['resourcemanager'] = available_hosts['resourcemanager']

        # Setup roles dict. We always include the history server and client.
        # Determine other roles based on our execution mode.
        roles = ['spark-history-server', 'spark-client']
        if mode == 'standalone':
            roles.append('spark-master')
            roles.append('spark-worker')
        elif mode.startswith('yarn'):
            roles.append('spark-on-yarn')
            roles.append('spark-yarn-slave')

        # Setup overrides dict
        override = {
            'spark::common::master_url': master_url,
            'spark::common::event_log_dir': events_log_dir,
            'spark::common::history_log_dir': events_log_dir,
        }
        if zk_units:
            zks = []
            for unit in zk_units:
                ip = utils.resolve_private_address(unit['host'])
                zks.append("%s:%s" % (ip, unit['port']))

            zk_connect = ",".join(zks)
            override['spark::common::zookeeper_connection_string'] = zk_connect
        else:
            override['spark::common::zookeeper_connection_string'] = None

        # Create our site.yaml and trigger puppet
        bigtop = Bigtop()
        bigtop.render_site_yaml(hosts, roles, override)
        bigtop.trigger_puppet()

        # Do this after our puppet bits in case puppet overrides needed perms
        if 'namenode' not in available_hosts:
            # Local event dir (not in HDFS) needs to be 777 so non-spark
            # users can write job history there. It needs to be g+s so
            # all entries will be readable by spark (in the spark group).
            # It needs to be +t so users cannot remove files they don't own.
            dc.path('spark_events').chmod(0o3777)

        self.patch_worker_master_url(master_ip, master_url)

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        req_driver_mem = hookenv.config()['driver_memory']
        executor_mem = '1g'
        req_executor_mem = hookenv.config()['executor_memory']
        if req_driver_mem.endswith('%'):
            if mode == 'standalone' or mode.startswith('local'):
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("driver_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if mode == 'standalone' or mode.startswith('local'):
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log("executor_memory percentage in non-local mode. Using 1g default.",
                            level=None)
        else:
            executor_mem = req_executor_mem

        spark_env = '/etc/spark/conf/spark-env.sh'
        utils.re_edit_in_place(spark_env, {
            r'.*SPARK_DRIVER_MEMORY.*': 'export SPARK_DRIVER_MEMORY={}'.format(driver_mem),
            r'.*SPARK_EXECUTOR_MEMORY.*': 'export SPARK_EXECUTOR_MEMORY={}'.format(executor_mem),
        }, append_non_matches=True)

        # Install SB (subsequent calls will reconfigure existing install)
        # SparkBench looks for the spark master in /etc/environment
        with utils.environment_edit_in_place('/etc/environment') as env:
            env['MASTER'] = master_url
        self.install_benchmark()
Ejemplo n.º 43
0
 def load(cls, cache_key, deserializer, key_attr):
     """
     Load the persisted cache and return a new instance of this class.
     """
     items = unitdata.kv().get(cache_key) or []
     return cls(cache_key, [deserializer(item) for item in items], key_attr)
def unset_meta_generation():
    unitdata.kv().unset(kvdata.KEY_META_GENERATION)
Ejemplo n.º 45
0
 def clear(self):
     """
     Clear this request's cached data.
     """
     unitdata.kv().unset(self._hash_key)
def get_meta_generation():
    return unitdata.kv().get(kvdata.KEY_META_GENERATION)
Ejemplo n.º 47
0
 def is_installed(self):
     return unitdata.kv().get('flink.installed')
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import Endpoint, toggle_flag, set_flag, data_changed

from charmhelpers.core import hookenv, unitdata

DB = unitdata.kv()


class KubeControlProvider(Endpoint):
    """
    Implements the kubernetes-master side of the kube-control interface.
    """
    def manage_flags(self):
        toggle_flag(self.expand_name("{endpoint_name}.connected"),
                    self.is_joined)
        toggle_flag(
            self.expand_name("{endpoint_name}.gpu.available"),
            self.is_joined and self._get_gpu(),
        )
        requests_data_id = self.expand_name("{endpoint_name}.requests")
        requests = self.auth_user()
def _load_creds():
    return kv().get('charm.vsphere.full-creds')
Ejemplo n.º 50
0
def configure_registry():
    '''Recreate the docker registry config.yml.'''
    charm_config = hookenv.config()
    registry_config = {'version': '0.1'}
    registry_config_file = '/etc/docker/registry/config.yml'

    # Some things need to be volume mounted in the container. Keep track of
    # those (recreate each time we configure). Regardless of the src location,
    # we explicitly mount config in the container under /etc/docker/registry.
    kv = unitdata.kv()
    kv.unset('docker_volumes')
    docker_volumes = {registry_config_file: '/etc/docker/registry/config.yml'}

    # auth (https://docs.docker.com/registry/configuration/#auth)
    auth = {}
    auth_basic = _get_auth_basic()
    if auth_basic:
        auth['htpasswd'] = auth_basic
        docker_volumes[auth_basic['path']] = '/etc/docker/registry/htpasswd'
    auth_token = _get_auth_token()
    if auth_token:
        auth['token'] = auth_token
        docker_volumes[auth_token['rootcertbundle']] = '/etc/docker/registry/auth_token.pem'
    registry_config['auth'] = auth

    # http (https://docs.docker.com/registry/configuration/#http)
    port = charm_config.get('registry-port')
    http = {'addr': '0.0.0.0:{}'.format(port),
            'headers': {'X-Content-Type-Options': ['nosniff']},
            'relativeurls': True}
    if charm_config.get('http-host'):
        http['host'] = charm_config['http-host']
    http_secret = leader_get('http-secret')
    if http_secret:
        http['secret'] = http_secret

    # Only does anything if tls-*-blob set.
    _write_tls_blobs_to_files()

    tls_ca = charm_config.get('tls-ca-path', '')
    tls_cert = charm_config.get('tls-cert-path', '')
    tls_key = charm_config.get('tls-key-path', '')
    if os.path.isfile(tls_cert) and os.path.isfile(tls_key):
        http['tls'] = {
            'certificate': tls_cert,
            'key': tls_key,
        }
        docker_volumes[tls_cert] = '/etc/docker/registry/registry.crt'
        docker_volumes[tls_key] = '/etc/docker/registry/registry.key'

        if os.path.isfile(tls_ca):
            http['tls']['clientcas'] = [tls_ca]
            docker_volumes[tls_ca] = '/etc/docker/registry/ca.crt'
    registry_config['http'] = http

    # log (https://docs.docker.com/registry/configuration/#log)
    registry_config['log'] = {
        'level': charm_config['log-level'],
        'formatter': 'json',
        'fields': {
            'service': 'registry',
        }
    }

    # health (https://docs.docker.com/registry/configuration/#health)
    registry_config['health'] = {
        'storagedriver': {
            'enabled': True,
            'interval': '10s',
            'threshold': 3,
        }
    }

    # storage (https://docs.docker.com/registry/configuration/#storage)
    # we must have 1 (and only 1) storage driver
    storage = {}
    if charm_config.get('storage-swift-authurl'):
        storage['swift'] = {
            'authurl': charm_config.get('storage-swift-authurl', ''),
            'username': charm_config.get('storage-swift-username', ''),
            'password': charm_config.get('storage-swift-password', ''),
            'region': charm_config.get('storage-swift-region', ''),
            'container': charm_config.get('storage-swift-container', ''),
            'tenant': charm_config.get('storage-swift-tenant', ''),
        }

        # Openstack Domain settings (https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/swift.md)
        val = charm_config.get('storage-swift-domain', '')
        if val is not '':
            storage['swift'].update({'domain': val})

        storage['redirect'] = {'disable': True}
    else:
        # If we're not swift, we're local.
        container_registry_path = '/var/lib/registry'
        storage['filesystem'] = {'rootdirectory': container_registry_path}
        storage['cache'] = {'blobdescriptor': 'inmemory'}

        # Local storage is mounted from the host so images persist across
        # registry container restarts.
        host_registry_path = '/srv/registry'
        os.makedirs(host_registry_path, exist_ok=True)
        docker_volumes[host_registry_path] = container_registry_path
    if charm_config.get('storage-delete'):
        storage['delete'] = {'enabled': True}
    if charm_config.get('storage-read-only'):
        storage['maintenance'] = {'readonly': {'enabled': True}}
    registry_config['storage'] = storage

    os.makedirs(os.path.dirname(registry_config_file), exist_ok=True)
    host.write_file(
        registry_config_file,
        yaml.safe_dump(registry_config),
        perms=0o600,
    )

    # NB: all hooks will flush, but do an explicit one now in case we call
    # something that needs this data before our hook ends.
    kv.set('docker_volumes', docker_volumes)
    kv.flush(True)

    # Configure the system so our local 'docker' commands can interact
    # with the registry.
    _configure_local_client()
Ejemplo n.º 51
0
import shutil

from subprocess import check_call

from charmhelpers.core import hookenv
from charmhelpers.core import unitdata

from charms import reactive
from charms.reactive import hook
from charms.reactive import when, when_not, when_any

db = unitdata.kv()
config = hookenv.config()


@hook('config-changed')
def config_changed():
    restart_if_need()


@hook('mysql-relation-changed')
def mysql_changed():
    restart_if_need()


def restart_if_need():
    if reactive.is_state('restcomm.started'):
        reactive.set_state('restcomm.changed')


@hook('api-relation-joined')
Ejemplo n.º 52
0
def get_blacklist():
    """Get blacklist stored in the local kv() store"""
    db = unitdata.kv()
    return db.get('osd-blacklist', [])
Ejemplo n.º 53
0
#python3 pylint:disable=c0111
import json
import socket

from charmhelpers.core import unitdata

from charms.reactive import hook
from charms.reactive import RelationBase
from charms.reactive import scopes

RANGE = 29000

KV = unitdata.kv()


class OpenedPortsRequires(RelationBase):
    scope = scopes.UNIT

    @hook('{requires:opened-ports}-relation-{joined,changed}')
    def changed(self):
        conv = self.conversation()
        if conv.get_remote('opened-ports'):
            # this unit's conversation has a port, so
            # it is part of the set of available units
            opened_ports = json.loads(conv.get_remote('opened-ports'))
            port_forwards = conv.get_local('port-forwards', [])
            # Get public ip address
            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            sock.connect(("google.com", 80))
            public_address = sock.getsockname()[0]
            sock.close()
Ejemplo n.º 54
0
    def configure(self, available_hosts, zk_units, peers, extra_libs):
        """
        This is the core logic of setting up spark.

        :param dict available_hosts: Hosts that Spark should know about.
        :param list zk_units: List of Zookeeper dicts with host/port info.
        :param list peers: List of Spark peer tuples (unit name, IP).
        :param list extra_libs: List of extra lib paths for driver/executors.
        """
        # Set KV based on connected applications
        unitdata.kv().set('zookeeper.units', zk_units)
        unitdata.kv().set('sparkpeer.units', peers)
        unitdata.kv().flush(True)

        # Get our config ready
        dc = self.dist_config
        mode = hookenv.config()['spark_execution_mode']
        master_ip = utils.resolve_private_address(
            available_hosts['spark-master'])
        master_url = self.get_master_url(master_ip)
        req_driver_mem = hookenv.config()['driver_memory']
        req_executor_mem = hookenv.config()['executor_memory']
        if mode.startswith('yarn'):
            spark_events = 'hdfs://{}'.format(dc.path('spark_events'))
        else:
            spark_events = 'file://{}'.format(dc.path('spark_events'))

        # handle tuning options that may be set as percentages
        driver_mem = '1g'
        executor_mem = '1g'
        if req_driver_mem.endswith('%'):
            if mode == 'standalone' or mode.startswith('local'):
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_driver_mem.strip('%')) / 100
                driver_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "driver_memory percentage in non-local mode. "
                    "Using 1g default.",
                    level=hookenv.WARNING)
        else:
            driver_mem = req_driver_mem

        if req_executor_mem.endswith('%'):
            if mode == 'standalone' or mode.startswith('local'):
                mem_mb = host.get_total_ram() / 1024 / 1024
                req_percentage = float(req_executor_mem.strip('%')) / 100
                executor_mem = str(int(mem_mb * req_percentage)) + 'm'
            else:
                hookenv.log(
                    "executor_memory percentage in non-local mode. "
                    "Using 1g default.",
                    level=hookenv.WARNING)
        else:
            executor_mem = req_executor_mem

        # Some spark applications look for envars in /etc/environment
        with utils.environment_edit_in_place('/etc/environment') as env:
            env['MASTER'] = master_url
            env['SPARK_HOME'] = dc.path('spark_home')

        # Setup hosts dict
        hosts = {
            'spark': master_ip,
        }
        if 'namenode' in available_hosts:
            hosts['namenode'] = available_hosts['namenode']
        if 'resourcemanager' in available_hosts:
            hosts['resourcemanager'] = available_hosts['resourcemanager']

        # Setup roles dict. We always include the history server and client.
        # Determine other roles based on our execution mode.
        roles = ['spark-history-server', 'spark-client']
        if mode == 'standalone':
            roles.append('spark-master')
            roles.append('spark-worker')
        elif mode.startswith('yarn'):
            roles.append('spark-on-yarn')
            roles.append('spark-yarn-slave')

        # Setup overrides dict
        override = {
            'spark::common::master_url':
            master_url,
            'spark::common::event_log_dir':
            spark_events,
            'spark::common::history_log_dir':
            spark_events,
            'spark::common::extra_lib_dirs':
            ':'.join(extra_libs) if extra_libs else None,
            'spark::common::driver_mem':
            driver_mem,
            'spark::common::executor_mem':
            executor_mem,
        }
        if zk_units:
            zks = []
            for unit in zk_units:
                ip = utils.resolve_private_address(unit['host'])
                zks.append("%s:%s" % (ip, unit['port']))

            zk_connect = ",".join(zks)
            override['spark::common::zookeeper_connection_string'] = zk_connect
        else:
            override['spark::common::zookeeper_connection_string'] = None

        # Create our site.yaml and trigger puppet.
        # NB: during an upgrade, we configure the site.yaml, but do not
        # trigger puppet. The user must do that with the 'reinstall' action.
        bigtop = Bigtop()
        bigtop.render_site_yaml(hosts, roles, override)
        if unitdata.kv().get('spark.version.repo', False):
            hookenv.log(
                "An upgrade is available and the site.yaml has been "
                "configured. Run the 'reinstall' action to continue.",
                level=hookenv.INFO)
        else:
            bigtop.trigger_puppet()
            self.patch_worker_master_url(master_ip, master_url)

            # Packages don't create the event dir by default. Do it each time
            # spark is (re)installed to ensure location/perms are correct.
            self.configure_events_dir(mode)

        # Handle examples and Spark-Bench. Do this each time this method is
        # called in case we need to act on a new resource or user config.
        self.configure_examples()
        self.configure_sparkbench()
Ejemplo n.º 55
0
    def configure_sparkbench(self):
        """
        Install/configure/remove Spark-Bench based on user config.

        If config[spark_bench_enabled], fetch, install, and configure
        Spark-Bench on initial invocation. Subsequent invocations will skip the
        fetch/install, but will reconfigure Spark-Bench since we may need to
        adjust the data dir (eg: benchmark data is stored in hdfs when spark
        is in yarn mode; locally in all other execution modes).
        """
        install_sb = hookenv.config()['spark_bench_enabled']
        sb_dir = '/home/ubuntu/SparkBench'
        if install_sb:
            # Fetch/install on our first go-round, then set unit data so we
            # don't reinstall every time this function is called.
            if not unitdata.kv().get('spark_bench.installed', False):
                sb_url = hookenv.config()['spark_bench_url']

                Path(sb_dir).rmtree_p()
                au = ArchiveUrlFetchHandler()
                au.install(sb_url, '/home/ubuntu')

                # NB: This block is unused when using one of our sb tgzs. It
                # may come in handy if people want a tgz that does not expand
                # to our expected sb_dir.
                # #####
                # Handle glob if we use a .tgz that doesn't expand to sb_dir
                # sb_archive_dir = glob('/home/ubuntu/SparkBench*')[0]
                # SparkBench expects to live in ~/SparkBench, so put it there
                # Path(sb_archive_dir).rename(sb_dir)
                # #####

                # Ensure users in the spark group can write to any subdirectory
                # of sb_dir (spark needs to write benchmark output there when
                # running in local modes).
                host.chownr(Path(sb_dir), 'ubuntu', 'spark', chowntopdir=True)
                for r, d, f in os.walk(sb_dir):
                    os.chmod(r, 0o2775)

                unitdata.kv().set('spark_bench.installed', True)
                unitdata.kv().flush(True)

            # Configure the SB env every time this function is called.
            sb_conf = '{}/conf'.format(sb_dir)
            sb_env = Path(sb_conf) / 'env.sh'
            if not sb_env.exists():
                (Path(sb_conf) / 'env.sh.template').copy(sb_env)

            # NB: A few notes on configuring SparkBench:
            # 1. Input data has been pregenerated and packed into the tgz. All
            # spark cluster members will have this data locally, which enables
            # us to execute benchmarks in the absense of HDFS. When spark is in
            # yarn mode, we'll need to generate and store this data in HDFS
            # so nodemanagers can access it (NMs obviously won't have SB
            # installed locally). Set DATA_HDFS to a local dir or common HDFS
            # location depending on our spark execution mode.
            #
            # 2. SB tries to SSH to spark workers to purge vmem caches. This
            # isn't possible in containers, nor is it possible in our env
            # because we don't distribute ssh keys among cluster members.
            # Set MC_LIST to an empty string to prevent this behavior.
            #
            # 3. Throughout SB, HADOOP_HOME/bin is used as the prefix for the
            # hdfs command. Bigtop's hdfs lives at /usr/bin/hdfs, so set the
            # SB HADOOP_HOME accordingly (it's not used for anything else).
            #
            # 4. Use our MASTER envar to set the SparkBench SPARK_MASTER url.
            # It is updated every time we (re)configure spark.
            mode = hookenv.config()['spark_execution_mode']
            if mode.startswith('yarn'):
                sb_data_dir = "hdfs:///user/ubuntu/SparkBench"
            else:
                sb_data_dir = "file://{}".format(sb_dir)

            utils.re_edit_in_place(
                sb_env, {
                    r'^DATA_HDFS *=.*': 'DATA_HDFS="{}"'.format(sb_data_dir),
                    r'^DATASET_DIR *=.*':
                    'DATASET_DIR="{}/dataset"'.format(sb_dir),
                    r'^MC_LIST *=.*': 'MC_LIST=""',
                    r'.*HADOOP_HOME *=.*': 'HADOOP_HOME="/usr"',
                    r'.*SPARK_HOME *=.*': 'SPARK_HOME="/usr/lib/spark"',
                    r'^SPARK_MASTER *=.*': 'SPARK_MASTER="$MASTER"',
                })
        else:
            # config[spark_bench_enabled] is false; remove it
            Path(sb_dir).rmtree_p()
            unitdata.kv().set('spark_bench.installed', False)
            unitdata.kv().flush(True)
Ejemplo n.º 56
0
def service_cidr():
    ''' Return the charm's service-cidr config '''
    db = unitdata.kv()
    frozen_cidr = db.get('kubernetes-master.service-cidr')
    return frozen_cidr or hookenv.config('service-cidr')
Ejemplo n.º 57
0
def post_series_upgrade():
    """Handler for post-series-upgrade.
    """
    unitdata.kv().set('charm.vault.series-upgrading', False)
Ejemplo n.º 58
0
def freeze_service_cidr():
    ''' Freeze the service CIDR. Once the apiserver has started, we can no
    longer safely change this value. '''
    db = unitdata.kv()
    db.set('kubernetes-master.service-cidr', service_cidr())
    def render_and_restart():
        rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all()

    render_and_restart()
    update_clients()


@hooks.hook('update-status')
@harden()
def update_status():
    log('Updating status.')


if __name__ == '__main__':
    try:
        hooks.execute(sys.argv)
    except UnregisteredHookError as e:
        log('Unknown hook {} - skipping.'.format(e))
    # This solves one off problems waiting for the cluster to complete
    # It will get executed only once as soon as leader_node_is_ready()
    # or client_node_is_ready() returns True
    # Subsequent client requests will be handled by normal
    # amqp-relation-changed hooks
    kvstore = kv()
    if not kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False):
        log("Rerunning update_clients as initial update not yet performed",
            level=DEBUG)
        update_clients()

    rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES()))
Ejemplo n.º 60
0
 def _add_override(self, name, value):
     unitdata.kv().update({
         name: value,
     },
                          prefix='zeppelin.bigtop.overrides.')