Beispiel #1
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO)

    # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039
    # ensure that external handlers can tell what hook they're running in
    if 'JUJU_HOOK_NAME' not in os.environ:
        os.environ['JUJU_HOOK_NAME'] = os.path.basename(sys.argv[0])

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()
    hookenv.atexit(flush_kv)
    try:
        bus.discover()
        hookenv._run_atstart()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
Beispiel #2
0
    def initialize(self):
        if self.requests is not None:
            return  # Already initialized.

        assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'

        if self.relname is None:
            self.relname = _implicit_peer_relation_name()

        relids = hookenv.relation_ids(self.relname)
        if relids:
            self.relid = sorted(relids)[0]

        # Load our state, from leadership, the peer relationship, and maybe
        # local state as a fallback. Populates self.requests and self.grants.
        self._load_state()
        self._emit_state()

        # Save our state if the hook completes successfully.
        hookenv.atexit(self._save_state)

        # Schedule release of granted locks for the end of the hook.
        # This needs to be the last of our atexit callbacks to ensure
        # it will be run first when the hook is complete, because there
        # is no point mutating our state after it has been saved.
        hookenv.atexit(self._release_granted)
def init_config_states():
    import yaml
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    from charms.reactive import toggle_state

    config = hookenv.config()

    config_defaults = {}
    config_defs = {}
    config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
    if os.path.exists(config_yaml):
        with open(config_yaml) as fp:
            config_defs = yaml.safe_load(fp).get('options', {})
            config_defaults = {
                key: value.get('default')
                for key, value in config_defs.items()
            }
    for opt in config_defs.keys():
        if config.changed(opt):
            set_state('config.changed')
            set_state('config.changed.{}'.format(opt))
        toggle_state('config.set.{}'.format(opt), config.get(opt))
        toggle_state('config.default.{}'.format(opt),
                     config.get(opt) == config_defaults[opt])
    hookenv.atexit(clear_config_states)
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
Beispiel #5
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO)

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()
    hookenv.atexit(flush_kv)
    try:
        bus.discover()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
    def initialize(self):
        if self.requests is not None:
            return  # Already initialized.

        assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'

        if self.relname is None:
            self.relname = _implicit_peer_relation_name()

        relids = hookenv.relation_ids(self.relname)
        if relids:
            self.relid = sorted(relids)[0]

        # Load our state, from leadership, the peer relationship, and maybe
        # local state as a fallback. Populates self.requests and self.grants.
        self._load_state()
        self._emit_state()

        # Save our state if the hook completes successfully.
        hookenv.atexit(self._save_state)

        # Schedule release of granted locks for the end of the hook.
        # This needs to be the last of our atexit callbacks to ensure
        # it will be run first when the hook is complete, because there
        # is no point mutating our state after it has been saved.
        hookenv.atexit(self._release_granted)
Beispiel #7
0
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
Beispiel #8
0
    def _startup(cls):
        # update data to be backwards compatible after fix for issue 28
        _migrate_conversations()

        if hookenv.hook_name().endswith('-relation-departed'):
            def depart_conv():
                cls(hookenv.relation_type()).conversation().depart()
            hookenv.atexit(depart_conv)
    def set_trigger_like_state(self, state):
        """ States set via this helper will be unset at the end of the hook invocation.
        This behves somewhat like a event rather than a state. """
        self.set_state(state)

        def cleanup_func():
            self.remove_state(state)

        atexit(cleanup_func)
Beispiel #10
0
def init_config_states(upgrade=False):
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    config = hookenv.config()
    for opt in config.keys():
        if config.changed(opt) or upgrade:
            set_state('config.changed')
            set_state('config.changed.{}'.format(opt))
    hookenv.atexit(clear_config_states)
 def changed(self):
     self.set_state('{relation_name}.connected')
     if self._controller_config_ready():
         self.set_state('{relation_name}.available')
         if data_changed('config', self.get_config()):
             self.set_state('{relation_name}.changed')
             atexit(lambda: self.remove_state('{relation_name}.changed'))
     else:
         self.remove_state('{relation_name}.available')
Beispiel #12
0
def init_config_states():
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    config = hookenv.config()
    for opt in config.keys():
        if config.changed(opt):
            set_state('config.changed')
            set_state('config.changed.{}'.format(opt))
    hookenv.atexit(clear_config_states)
Beispiel #13
0
 def _update_states(self):
     if self._nodes_ready():
         if self.get_nodes():
             self.set_state('{relation_name}.available')
             if data_changed('nodes', self.get_nodes()):
                 self.set_state('{relation_name}.changed')
                 atexit(
                     lambda: self.remove_state('{relation_name}.changed'))
     else:
         self.remove_state('{relation_name}.available')
Beispiel #14
0
def init_config_states():
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    from charms.reactive import toggle_state
    config = hookenv.config()
    for opt in config.keys():
        if config.changed(opt):
            set_state('config.changed')
            set_state('config.changed.{}'.format(opt))
        toggle_state('config.set.{}'.format(opt), config[opt])
    hookenv.atexit(clear_config_states)
Beispiel #15
0
def upgrade_charm():
    # migrate to new flags
    if is_state('kubernetes-worker.restarted-for-cloud'):
        remove_state('kubernetes-worker.restarted-for-cloud')
        set_state('kubernetes-worker.cloud.ready')
    if is_state('kubernetes-worker.cloud-request-sent'):
        # minor change, just for consistency
        remove_state('kubernetes-worker.cloud-request-sent')
        set_state('kubernetes-worker.cloud.request-sent')

    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    migrate_resource_checksums(checksum_prefix, snap_resources)
    if check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
        set_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        set_state('kubernetes-worker.remove-old-ingress')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state('kubernetes-worker.gpu.enabled'):
        remove_state('kubernetes-worker.gpu.enabled')
        try:
            disable_gpu()
        except ApplyNodeLabelFailed:
            # Removing node label failed. Probably the master is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')

    if hookenv.config('ingress'):
        set_state('kubernetes-worker.ingress.enabled')
    else:
        remove_state('kubernetes-worker.ingress.enabled')

    # force certs to be updated
    if is_state('certificates.available') and \
       is_state('kube-control.connected'):
        send_data()

    if is_state('kubernetes-worker.registry.configured'):
        set_state('kubernetes-master-worker-base.registry.configured')
        remove_state('kubernetes-worker.registry.configured')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
Beispiel #16
0
def init_config_states():
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    from charms.reactive import toggle_state

    config = hookenv.config()
    for opt in config.keys():
        if config.changed(opt):
            set_state("config.changed")
            set_state("config.changed.{}".format(opt))
        toggle_state("config.set.{}".format(opt), config[opt])
    hookenv.atexit(clear_config_states)
Beispiel #17
0
    def assess_status(self):
        """This is a deferring version of _assess_status that only runs during
        exit. This method can be called multiple times, but it will ensure that
        the _assess_status() is only called once at the end of the charm after
        all handlers have completed.
        """
        if not self.__run_assess_status:
            self.__run_assess_status = True

            def atexit_assess_status():
                hookenv.log("Running _assess_status()", level=hookenv.DEBUG)
                self._assess_status()
            hookenv.atexit(atexit_assess_status)
def default_update_status():
    """Default handler for update-status state.

    Sets the state so that the default update-status handler can be called.
    Sets the flag is-update-status-hook to indicate that the current hook is an
    update-status hook; registers an atexit handler to clear the flag at the
    end of the hook.
    """
    reactive.set_flag('run-default-update-status')
    reactive.set_flag('is-update-status-hook')

    def atexit_clear_update_status_flag():
        reactive.clear_flag('is-update-status-hook')

    hookenv.atexit(atexit_clear_update_status_flag)
Beispiel #19
0
    def configure(self, port):
        old_relation_info = {
            'hostname': self.get_local('hostname'),
            'port': self.get_local('port')
        }
        relation_info = {'hostname': hookenv.unit_private_ip(), 'port': port}
        self.set_remote(**relation_info)
        self.set_state(self.states.configured)
        hookenv.log('basic-auth-check relation configured')

        if relation_info != old_relation_info:
            self.set_local(**relation_info)
            self.set_state(self.states.changed)
            hookenv.log('basic-auth-check relation data changed')
            hookenv.atexit(self._clean_state_changed)
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    remove_state('kubernetes-worker.gpu.enabled')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    set_state('kubernetes-worker.restart-needed')
Beispiel #21
0
def update_status():
    """Handle update-status

    Sets the flag is-update-status-hook to indicate that the current hook is an
    update-status hook; registers an atexit handler to clear the flag at the
    end of the hook.

    This can be used to gate handlers that should not be run during
    update-status which is supposed to be a light-weight status update.
    """
    set_flag('is-update-status-hook')

    def atexit_clear_update_status_flag():
        clear_flag('is-update-status-hook')

    atexit(atexit_clear_update_status_flag)
Beispiel #22
0
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    remove_state('kubernetes-worker.gpu.enabled')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    set_state('kubernetes-worker.restart-needed')
Beispiel #23
0
 def _check_state_changed(self, conv):
     old_relation_info = {
         'hostname': conv.get_local('hostname'),
         'port': conv.get_local('port')
     }
     relation_info = {
         'hostname': conv.get_remote('hostname'),
         'port': conv.get_remote('port')
     }
     relation_gone = not conv.is_state(self.states.available)
     if relation_info != old_relation_info or relation_gone:
         conv.set_local(**relation_info)
         conv.set_state(self.states.changed)
         hookenv.log(
             'basic-auth-check relation data changed: {}'.format(
                 relation_info))
         hookenv.atexit(self._clean_state_changed)
Beispiel #24
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(),
                level=hookenv.INFO)

    # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039
    # ensure that external handlers can tell what hook they're running in
    if 'JUJU_HOOK_NAME' not in os.environ:
        os.environ['JUJU_HOOK_NAME'] = os.path.basename(sys.argv[0])

    # update data to be backwards compatible after fix for issue 28
    relations._migrate_conversations()

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()

    hookenv.atexit(flush_kv)
    if hookenv.hook_name().endswith('-relation-departed'):

        def depart_conv():
            rel = RelationBase.from_name(hookenv.relation_type())
            rel.conversation().depart()

        hookenv.atexit(depart_conv)
    try:
        bus.discover()
        hookenv._run_atstart()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
Beispiel #25
0
    def _startup(cls):
        """
        Create Endpoint instances and manage automatic flags.
        """
        for endpoint_name in sorted(hookenv.relation_types()):
            # populate context based on attached relations
            relf = relation_factory(endpoint_name)
            if not relf or not issubclass(relf, cls):
                continue

            rids = sorted(hookenv.relation_ids(endpoint_name))
            # ensure that relation IDs have the endpoint name prefix, in case
            # juju decides to drop it at some point
            rids = ['{}:{}'.format(endpoint_name, rid) if ':' not in rid
                    else rid for rid in rids]
            endpoint = relf(endpoint_name, rids)
            cls._endpoints[endpoint_name] = endpoint
            endpoint._manage_departed()
            endpoint._manage_flags()
            for relation in endpoint.relations:
                hookenv.atexit(relation._flush_data)
Beispiel #26
0
def init_config_states():
    import yaml
    from charmhelpers.core import hookenv
    from charms.reactive import set_state
    from charms.reactive import toggle_state
    config = hookenv.config()
    config_defaults = {}
    config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
    if os.path.exists(config_yaml):
        with open(config_yaml) as fp:
            config_defs = yaml.load(fp).get('options', {})
            config_defaults = {key: value.get('default')
                               for key, value in config_defs.items()}
    for opt in config.keys():
        if config.changed(opt):
            set_state('config.changed')
            set_state('config.changed.{}'.format(opt))
        toggle_state('config.set.{}'.format(opt), config[opt])
        toggle_state('config.default.{}'.format(opt),
                     config[opt] == config_defaults[opt])
    hookenv.atexit(clear_config_states)
Beispiel #27
0
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    remove_state('kubernetes-worker.gpu.enabled')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
Beispiel #28
0
def upgrade_charm():
    # Trigger removal of PPA docker installation if it was previously set.
    set_state('config.changed.install_from_upstream')
    hookenv.atexit(remove_state, 'config.changed.install_from_upstream')

    cleanup_pre_snap_services()
    check_resources_for_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get('ingress'):
        kubectl_success('delete', 'rc', 'nginx-ingress-controller')

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    remove_state('kubernetes-worker.gpu.enabled')

    remove_state('kubernetes-worker.cni-plugins.installed')
    remove_state('kubernetes-worker.config.created')
    remove_state('kubernetes-worker.ingress.available')
    remove_state('worker.auth.bootstrapped')
    set_state('kubernetes-worker.restart-needed')
Beispiel #29
0
 def changed(self):
     self.set_state('{relation_name}.available.updated')
     hookenv.atexit(self._clear_updated)
Beispiel #30
0
def install_tracer(tracer):
    global _tracer
    _tracer = tracer
    # Disable tracing when we hit atexit, to avoid spam from layers
    # such as when the base layer tears down its automatic flags.
    hookenv.atexit(install_tracer, NullTracer())
Beispiel #31
0
 def test_manage_calls_atexit(self):
     cb = mock.MagicMock()
     hookenv.atexit(cb)
     manager = services.ServiceManager()
     manager.manage()
     self.assertTrue(cb.called)
Beispiel #32
0
def _initialize():
    if not _statuses['_initialized']:
        if layer.options.get('status', 'patch-hookenv'):
            _patch_hookenv()
        hookenv.atexit(_finalize)
        _statuses['_initialized'] = True
Beispiel #33
0
        return
    else:
        if data_changed("layer.vault-kv.config", config):
            set_flag("layer.vault-kv.config.changed")


def manage_app_kv_flags():
    try:
        app_kv = vault_kv.VaultAppKV()
        for key in app_kv.keys():
            app_kv._manage_flags(key)
    except vault_kv.VaultNotReady:
        vault_kv.VaultAppKV._clear_all_flags()


def update_app_kv_hashes():
    try:
        app_kv = vault_kv.VaultAppKV()
        if app_kv.any_changed():
            if hookenv.is_leader():
                # force hooks to run on non-leader units
                hookenv.leader_set({"vault-kv-nonce": host.pwgen(8)})
            # Update the local unit hashes at successful exit
            app_kv.update_hashes()
    except vault_kv.VaultNotReady:
        return


hookenv.atstart(manage_app_kv_flags)
hookenv.atexit(update_app_kv_hashes)
Beispiel #34
0
def log_states():
    '''Log active states to aid debugging'''
    for state in sorted(reactive.helpers.get_states().keys()):
        hookenv.log('Reactive state: {}'.format(state), DEBUG)


def emit_deprecated_option_warnings():
    deprecated = sorted(helpers.deprecated_config_in_use())
    if deprecated:
        hookenv.log('Deprecated configuration settings in use: {}'
                    ''.format(', '.join(deprecated)), WARNING)


# emit_deprecated_option_warnings is called at the end of the hook
# so that the warnings to appear clearly at the end of the logs.
hookenv.atexit(emit_deprecated_option_warnings)


@when_not('postgresql.cluster.locale.set')
def generate_locale():
    '''Ensure that the requested database locale is available.

    The locale cannot be changed post deployment, as this would involve
    completely destroying and recreding the database.
    '''
    config = hookenv.config()
    if config['locale'] != 'C':
        status_set('maintenance',
                   'Generating {} locale'.format(config['locale']))
        subprocess.check_call(['locale-gen',
                               '{}.{}'.format(hookenv.config('locale'),
Beispiel #35
0
def prime_assess_status():
    atexit(_assess_status)
Beispiel #36
0
    try:
        result = subprocess.check_output(cmd).decode('utf-8')
        return result.split()
    except subprocess.CalledProcessError:
        return []


def log_mds():
    if len(relation_ids('ceph-mds')) < 1:
        return 'blocked', 'Missing relation: monitor'
    running_mds = get_running_mds()
    if not running_mds:
        return 'blocked', 'No MDS detected using current configuration'
    else:
        return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds))


# Per https://github.com/juju-solutions/charms.reactive/issues/33,
# this module may be imported multiple times so ensure the
# initialization hook is only registered once. I have to piggy back
# onto the namespace of a module imported before reactive discovery
# to do this.
if not hasattr(reactive, '_ceph_log_registered'):
    # We need to register this to run every hook, not just during install
    # and config-changed, to protect against race conditions. If we don't
    # do this, then the config in the hook environment may show updates
    # to running hooks well before the config-changed hook has been invoked
    # and the intialization provided an opertunity to be run.
    hookenv.atexit(assess_status)
    reactive._ceph_log_registered = True
Beispiel #37
0
        if not any(map(state.startswith, blacklist)):
            hookenv.log("Reactive state: {}".format(state), DEBUG)


def emit_deprecated_option_warnings():
    deprecated = sorted(helpers.deprecated_config_in_use())
    if deprecated:
        hookenv.log(
            "Deprecated configuration settings in use: {}".format(", ".join(deprecated)),
            WARNING,
        )


# emit_deprecated_option_warnings is called at the end of the hook
# so that the warnings to appear clearly at the end of the logs.
hookenv.atexit(emit_deprecated_option_warnings)


@when_not("postgresql.cluster.locale.set")
def generate_locale():
    """Ensure that the requested database locale is available.

    The locale cannot be changed post deployment, as this would involve
    completely destroying and recreding the database.
    """
    config = hookenv.config()
    if config["locale"] != "C":
        status_set("maintenance", "Generating {} locale".format(config["locale"]))
        subprocess.check_call(
            [
                "locale-gen",
    if not running_osds:
        return ('blocked',
                'No block devices detected using current configuration')
    else:
        return ('active',
                'Unit is ready ({} OSD)'.format(len(running_osds)))


def log_mds():
    if len(relation_ids('mon')) < 1:
        return 'blocked', 'Missing relation: monitor'
    running_mds = get_running_mds()
    if not running_mds:
        return 'blocked', 'No MDS detected using current configuration'
    else:
        return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds))

# Per https://github.com/juju-solutions/charms.reactive/issues/33,
# this module may be imported multiple times so ensure the
# initialization hook is only registered once. I have to piggy back
# onto the namespace of a module imported before reactive discovery
# to do this.
if not hasattr(reactive, '_ceph_log_registered'):
    # We need to register this to run every hook, not just during install
    # and config-changed, to protect against race conditions. If we don't
    # do this, then the config in the hook environment may show updates
    # to running hooks well before the config-changed hook has been invoked
    # and the intialization provided an opertunity to be run.
    hookenv.atexit(assess_status)
    reactive._ceph_log_registered = True
Beispiel #39
0
def upgrade_charm():
    # migrate to new flags
    if is_state("kubernetes-worker.restarted-for-cloud"):
        remove_state("kubernetes-worker.restarted-for-cloud")
        set_state("kubernetes-worker.cloud.ready")
    if is_state("kubernetes-worker.cloud-request-sent"):
        # minor change, just for consistency
        remove_state("kubernetes-worker.cloud-request-sent")
        set_state("kubernetes-worker.cloud.request-sent")
    if is_state("kubernetes-worker.snaps.installed"):
        # consistent with layer-kubernetes-node-base
        remove_state("kubernetes-worker.snaps.installed")
        set_state("kubernetes-node.snaps.installed")

    set_state("config.changed.install_from_upstream")
    hookenv.atexit(remove_state, "config.changed.install_from_upstream")

    cleanup_pre_snap_services()
    migrate_resource_checksums(checksum_prefix, snap_resources)
    if check_resources_for_upgrade_needed(checksum_prefix, snap_resources):
        set_upgrade_needed()

    # Remove the RC for nginx ingress if it exists
    if hookenv.config().get("ingress"):
        set_state("kubernetes-worker.remove-old-ingress")

    # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
    # since they can differ between k8s versions
    if is_state("kubernetes-worker.gpu.enabled"):
        remove_state("kubernetes-worker.gpu.enabled")
        try:
            disable_gpu()
        except LabelMaker.NodeLabelError:
            # Removing node label failed. Probably the control-plane is unavailable.
            # Proceed with the upgrade in hope GPUs will still be there.
            hookenv.log("Failed to remove GPU labels. Proceed with upgrade.")

    if hookenv.config("ingress"):
        set_state("kubernetes-worker.ingress.enabled")
    else:
        remove_state("kubernetes-worker.ingress.enabled")

    # force certs to be updated
    if all(
        is_state(flag)
        for flag in (
            "certificates.available",
            "kube-control.connected",
            "cni.available",
            "kube-control.dns.available",
        )
    ):
        send_data()

    if is_state("kubernetes-worker.registry.configured"):
        set_state("kubernetes-master-worker-base.registry.configured")
        remove_state("kubernetes-worker.registry.configured")

    # need to clear cni.available state if it's no longer accurate
    if is_state("cni.available"):
        cni = endpoint_from_flag("cni.available")
        if not cni.config_available():
            hookenv.log(
                "cni.config_available() is False, clearing" + " cni.available flag"
            )
            remove_state("cni.available")

    # need to bump the kube-control relation in case
    # kube-control.default_cni.available is not set when it should be
    if is_state("kube-control.connected"):
        kube_control = endpoint_from_flag("kube-control.connected")
        kube_control.manage_flags()

    shutil.rmtree("/root/cdk/kubelet/dynamic-config", ignore_errors=True)

    # kubernetes-worker.cni-plugins.installed flag is deprecated but we still
    # want to clean it up
    remove_state("kubernetes-worker.cni-plugins.installed")

    remove_state("kubernetes-worker.config.created")
    remove_state("kubernetes-worker.ingress.available")
    remove_state("worker.auth.bootstrapped")
    remove_state("nfs.configured")
    set_state("kubernetes-worker.restart-needed")
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     hookenv.atexit(lambda: self.remove_state('{relation_name}.triggered'))