def wrapper(servicename, *args, **kw):
     if hookenv.remote_unit():
         hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
                                                   func.__name__,
                                                   hookenv.remote_unit()))
     else:
         hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
                                              func.__name__))
     return func(*args, **kw)
Example #2
0
 def wrapper(servicename, *args, **kw):
     if hookenv.remote_unit():
         hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
                                                   func.__name__,
                                                   hookenv.remote_unit()))
     else:
         hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
                                              func.__name__))
     return func(*args, **kw)
def set_cp_agent():
    juju_log('Settig cp-agentd configuration for {} hook'.format(hook_name()))
    mport = 0
    for rid in relation_ids('cplane-controller'):
        for unit in related_units(rid):
            mport = relation_get(attribute='mport', unit=unit, rid=rid)
            uport = relation_get(attribute='uport', unit=unit, rid=rid)
            unicast_mode = config('enable-unicast')
            cplane_controller = relation_get(attribute='private-address',
                                             unit=unit, rid=rid)
            if mport:
                key = 'mcast-port=' + mport
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                key = 'mgmt-iface=' + config('mgmt-int')
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                if unicast_mode is True:
                    key = 'ucast-ip=' + cplane_controller
                    cmd = ['cp-agentd', 'set-config', key]
                    subprocess.check_call(cmd)
                else:
                    cmd = "sed -i '/ucast-ip/d' /etc/cplane/cp-config.json"
                    os.system(cmd)
                key = 'ucast-port=' + uport
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                key = 'log-level=' + str(config('cp-agent-log-level'))
                with open('/etc/cplane/cp-config.json', 'r') as file:
                    filedata = file.read()
                if '"{}"'.format(config('cp-agent-log-level')) not in filedata:
                    cmd = ['cp-agentd', 'set-config', key]
                    subprocess.check_call(cmd)
                key = 'vm-mtu=' + str(config('cp-vm-mtu'))
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)

                return

    key = 'mcast-port=' + str(config('cp-controller-mport'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'mgmt-iface=' + config('mgmt-int')
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'ucast-ip=' + config('cplane-controller-ip')
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'ucast-port=' + str(config('cp-controller-uport'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'log-level=' + str(config('cp-agent-log-level'))
    with open('/etc/cplane/cp-config.json', 'r') as file:
        filedata = file.read()
    if '"{}"'.format(config('cp-agent-log-level')) not in filedata:
        cmd = ['cp-agentd', 'set-config', key]
        subprocess.check_call(cmd)
    key = 'vm-mtu=' + str(config('cp-vm-mtu'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
Example #4
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
def ssh_authorized_peers(peer_interface, user, group=None,
                         ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = hook_name()
    if hook == '%s-relation-joined' % peer_interface:
        relation_set(ssh_pub_key=pub_key)
    elif hook == '%s-relation-changed' % peer_interface:
        hosts = []
        keys = []

        for r_id in relation_ids(peer_interface):
            for unit in related_units(r_id):
                ssh_pub_key = relation_get('ssh_pub_key',
                                           rid=r_id,
                                           unit=unit)
                priv_addr = relation_get('private-address',
                                         rid=r_id,
                                         unit=unit)
                if ssh_pub_key:
                    keys.append(ssh_pub_key)
                    hosts.append(priv_addr)
                else:
                    log('ssh_authorized_peers(): ssh_pub_key '
                        'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        relation_set(ssh_authorized_hosts=authed_hosts)
Example #6
0
 def decorated():
     hook_name = hookenv.hook_name()
     log('>>> Entering hook: {}.'.format(hook_name))
     try:
         return function()
     finally:
         log('<<< Exiting hook: {}.'.format(hook_name))
Example #7
0
def parse_hooks():
    if hookenv.hook_name() == 'upgrade-charm':
        kv = unitdata.kv()
        creds = kv.get('keystonecreds')
        kv.set('keystonecreds', creds)
        # render configs again
        do_reconfigure_nrpe()
Example #8
0
def ssh_authorized_peers(peer_interface,
                         user,
                         group=None,
                         ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = hook_name()
    if hook == '%s-relation-joined' % peer_interface:
        relation_set(ssh_pub_key=pub_key)
    elif hook == '%s-relation-changed' % peer_interface or \
            hook == '%s-relation-departed' % peer_interface:
        hosts = []
        keys = []

        for r_id in relation_ids(peer_interface):
            for unit in related_units(r_id):
                ssh_pub_key = relation_get('ssh_pub_key', rid=r_id, unit=unit)
                priv_addr = relation_get('private-address',
                                         rid=r_id,
                                         unit=unit)
                if ssh_pub_key:
                    keys.append(ssh_pub_key)
                    hosts.append(priv_addr)
                else:
                    log('ssh_authorized_peers(): ssh_pub_key '
                        'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        relation_set(ssh_authorized_hosts=authed_hosts)
Example #9
0
    def get_remote(self, key, default=None):
        """
        Get a value from the remote end(s) of this conversation.

        Note that if a conversation's scope encompasses multiple units, then
        those units are expected to agree on their data, whether that is through
        relying on a single leader to set the data or by all units eventually
        converging to identical data.  Thus, this method returns the first
        value that it finds set by any of its units.
        """
        cur_rid = hookenv.relation_id()
        departing = hookenv.hook_name().endswith('-relation-departed')
        for relation_id in self.relation_ids:
            units = hookenv.related_units(relation_id)
            if departing and cur_rid == relation_id:
                # Work around the fact that Juju 2.0 doesn't include the
                # departing unit in relation-list during the -departed hook,
                # by adding it back in ourselves.
                units.append(hookenv.remote_unit())
            for unit in units:
                if unit not in self.units:
                    continue
                value = hookenv.relation_get(key, unit, relation_id)
                if value:
                    return value
        return default
Example #10
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
Example #11
0
def parse_hooks():
    if hookenv.hook_name() == 'upgrade-charm':
        # Check if creds storage needs to be migrated
        # Old key: keystone-relation-creds
        # New key: keystonecreds
        kv = unitdata.kv()
        creds = kv.get('keystonecreds')
        old_creds = kv.get('keystone-relation-creds')
        if old_creds and not creds:
            # This set of creds needs an update to a newer format
            creds = {
                'username': old_creds['credentials_username'],
                'password': old_creds['credentials_password'],
                'project_name': old_creds['credentials_project'],
                'tenant_name': old_creds['credentials_project'],
                'user_domain_name': old_creds.get('credentials_user_domain'),
                'project_domain_name':
                old_creds.get('credentials_project_domain'),
            }
            kv.set('keystonecreds', creds)

        if old_creds:
            kv.unset('keystone-relation-creds')

        # update rally check files and plugins, which may have changed
        helper.update_plugins()
        helper.update_rally_checkfiles()

        # render configs again
        do_reconfigure_nrpe()
Example #12
0
def manage():
    if hookenv.hook_name() == 'health':
        report_consolidated_health()
        return
    manager = services.ServiceManager([
        {
            'service': 'bundle',
            'required_data': [
                JujuAPICredentials(),
                ArtifactsCache(),
            ],
            'data_ready': [
                cache_unit_addresses,
                precache_job_artifacts,
                generate,
                deploy,
            ],
            'start': [],
            'stop': [],
        },
        {
            'service': 'nginx',
            'required_data': [{'charm_dir': hookenv.charm_dir(),
                               'config': hookenv.config()}],
            'provided_data': [OrchestratorRelation()],
            'data_ready': [
                services.render_template(
                    source='nginx.conf',
                    target='/etc/nginx/sites-enabled/artifact_proxy'),
            ],
        },
    ])
    manager.manage()
Example #13
0
def any_hook(*hook_patterns):
    """
    Assert that the currently executing hook matches one of the given patterns.

    Each pattern will match one or more hooks, and can use the following
    special syntax:

      * ``db-relation-{joined,changed}`` can be used to match multiple hooks
        (in this case, ``db-relation-joined`` and ``db-relation-changed``).
      * ``{provides:mysql}-relation-joined`` can be used to match a relation
        hook by the role and interface instead of the relation name.  The role
        must be one of ``provides``, ``requires``, or ``peer``.
      * The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}``
    """
    current_hook = hookenv.hook_name()

    # expand {role:interface} patterns
    i_pat = re.compile(r'{([^:}]+):([^}]+)}')
    hook_patterns = _expand_replacements(
        i_pat, hookenv.role_and_interface_to_relations, hook_patterns)

    # expand {A,B,C,...} patterns
    c_pat = re.compile(r'{((?:[^:,}]+,?)+)}')
    hook_patterns = _expand_replacements(c_pat, lambda v: v.split(','),
                                         hook_patterns)

    return current_hook in hook_patterns
Example #14
0
def any_hook(*hook_patterns):
    """
    Assert that the currently executing hook matches one of the given patterns.

    Each pattern will match one or more hooks, and can use the following
    special syntax:

      * ``db-relation-{joined,changed}`` can be used to match multiple hooks
        (in this case, ``db-relation-joined`` and ``db-relation-changed``).
      * ``{provides:mysql}-relation-joined`` can be used to match a relation
        hook by the role and interface instead of the relation name.  The role
        must be one of ``provides``, ``requires``, or ``peer``.
      * The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}``
    """
    current_hook = hookenv.hook_name()

    # expand {role:interface} patterns
    i_pat = re.compile(r'{([^:}]+):([^}]+)}')
    hook_patterns = _expand_replacements(i_pat, hookenv.role_and_interface_to_relations, hook_patterns)

    # expand {A,B,C,...} patterns
    c_pat = re.compile(r'{((?:[^:,}]+,?)+)}')
    hook_patterns = _expand_replacements(c_pat, lambda v: v.split(','), hook_patterns)

    return current_hook in hook_patterns
Example #15
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO)

    # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039
    # ensure that external handlers can tell what hook they're running in
    if 'JUJU_HOOK_NAME' not in os.environ:
        os.environ['JUJU_HOOK_NAME'] = os.path.basename(sys.argv[0])

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()
    hookenv.atexit(flush_kv)
    try:
        bus.discover()
        hookenv._run_atstart()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
    def get_remote(self, key, default=None):
        """
        Get a value from the remote end(s) of this conversation.

        Note that if a conversation's scope encompasses multiple units, then
        those units are expected to agree on their data, whether that is through
        relying on a single leader to set the data or by all units eventually
        converging to identical data.  Thus, this method returns the first
        value that it finds set by any of its units.
        """
        cur_rid = hookenv.relation_id()
        departing = hookenv.hook_name().endswith("-relation-departed")
        for relation_id in self.relation_ids:
            units = hookenv.related_units(relation_id)
            if departing and cur_rid == relation_id:
                # Work around the fact that Juju 2.0 doesn't include the
                # departing unit in relation-list during the -departed hook,
                # by adding it back in ourselves.
                units.append(hookenv.remote_unit())
            for unit in units:
                if unit not in self.units:
                    continue
                value = hookenv.relation_get(key, unit, relation_id)
                if value:
                    return value
        return default
Example #17
0
def get_all_remote(conv, key):
    """
    Current conversation method get_remote implementation only
    return one value. But sometime we want get all remote key/value
    for conversation scope GLOBAL and SERVICE
    conv is the conversation to work with .
    This need to be called in a relation hook handler
    """
    values = {}
    cur_rid = hookenv.relation_id()
    departing = hookenv.hook_name().endswith('-relation-departed')
    for relation_id in conv.relation_ids:
        units = hookenv.related_units(relation_id)
        if departing and cur_rid == relation_id:
            # Work around the fact that Juju 2.0 doesn't include the
            # departing unit in relation-list during the -departed hook,
            # by adding it back in ourselves.
            units.append(hookenv.remote_unit())
        for unit in units:
            if unit not in units:
                continue
            value = hookenv.relation_get(key, unit, relation_id)
            if value:
                values[unit] = value
    return values
def get_all_remote(conv, key):
    """
    Current conversation method get_remote implementation only
    return one value. But sometime we want get all remote key/value
    for conversation scope GLOBAL and SERVICE
    conv is the conversation to work with .
    This need to be called in a relation hook handler
    """
    values = {} 
    cur_rid = hookenv.relation_id()
    departing = hookenv.hook_name().endswith('-relation-departed')
    for relation_id in conv.relation_ids:
       units = hookenv.related_units(relation_id)
       if departing and cur_rid == relation_id:
          # Work around the fact that Juju 2.0 doesn't include the
          # departing unit in relation-list during the -departed hook,
          # by adding it back in ourselves.
          units.append(hookenv.remote_unit())
       for unit in units:
          if unit not in units:
             continue
          value = hookenv.relation_get(key, unit, relation_id)
          if value:
             values[unit] =  value
    return values
Example #19
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}

        # check that the charm can write to the conf dir.  If not, then nagios
        # probably isn't installed, and we can defer.
        if not self.does_nrpe_conf_dir_exist():
            return

        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }
            # If we were passed max_check_attempts, add that to the relation data
            try:
                nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
            except AttributeError:
                pass

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            reldata = relation_get(unit=local_unit(), rid=rid)
            if 'monitors' in reldata:
                # update the existing set of monitors with the new data
                old_monitors = yaml.safe_load(reldata['monitors'])
                old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
                # remove keys that are in the remove_check_queue
                old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
                                     if k not in self.remove_check_queue}
                # update/add nrpe_monitors
                old_nrpe_monitors.update(nrpe_monitors)
                old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
                # write back to the relation
                relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
            else:
                # write a brand new set of monitors, as no existing ones.
                relation_set(relation_id=rid, monitors=yaml.dump(monitors))

        self.remove_check_queue.clear()
Example #20
0
    def _manage_flags(self):
        """
        Manage automatic relation flags.

        Internal use only.
        """
        already_joined = is_flag_set(self.expand_name('joined'))
        hook_name = hookenv.hook_name()
        rel_hook = hook_name.startswith(self.endpoint_name + '-relation-')
        departed_hook = rel_hook and hook_name.endswith('-departed')

        toggle_flag(self.expand_name('joined'), self.is_joined)

        if departed_hook:
            set_flag(self.expand_name('departed'))
        elif self.is_joined:
            clear_flag(self.expand_name('departed'))

        if already_joined and not rel_hook:
            # skip checking relation data outside hooks for this relation
            # to save on API calls to the controller (unless we didn't have
            # the joined flag before, since then we might migrating to Endpoints)
            return

        for unit in self.all_units:
            for key, value in unit.received.items():
                data_key = 'endpoint.{}.{}.{}.{}'.format(
                    self.endpoint_name, unit.relation.relation_id,
                    unit.unit_name, key)
                if data_changed(data_key, value):
                    set_flag(self.expand_name('changed'))
                    set_flag(self.expand_name('changed.{}'.format(key)))
        self.manage_flags()
Example #21
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO)

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()
    hookenv.atexit(flush_kv)
    try:
        bus.discover()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
Example #22
0
def set_cp_agent():
    juju_log('Settig cp-agentd configuration for {} hook'.format(hook_name()))
    mport = 0
    for rid in relation_ids('cplane-controller'):
        for unit in related_units(rid):
            mport = relation_get(attribute='mport', unit=unit, rid=rid)
            uport = relation_get(attribute='uport', unit=unit, rid=rid)
            unicast_mode = config('enable-unicast')
            cplane_controller = relation_get('private-address')
            if mport:
                key = 'mcast-port=' + mport
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                key = 'mgmt-iface=' + config('mgmt-int')
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                if unicast_mode is True:
                    key = 'ucast-ip=' + cplane_controller
                    cmd = ['cp-agentd', 'set-config', key]
                    subprocess.check_call(cmd)
                else:
                    cmd = "sed -i '/ucast-ip/d' /etc/cplane/cp-config.json"
                    os.system(cmd)
                key = 'ucast-port=' + uport
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)
                key = 'log-level=' + str(config('cp-agent-log-level'))
                with open('/etc/cplane/cp-config.json', 'r') as file:
                    filedata = file.read()
                if '"{}"'.format(config('cp-agent-log-level')) not in filedata:
                    cmd = ['cp-agentd', 'set-config', key]
                    subprocess.check_call(cmd)
                key = 'vm-mtu=' + str(config('cp-vm-mtu'))
                cmd = ['cp-agentd', 'set-config', key]
                subprocess.check_call(cmd)

                return

    key = 'mcast-port=' + str(config('cp-controller-mport'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'mgmt-iface=' + config('mgmt-int')
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'ucast-ip=' + config('cplane-controller-ip')
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'ucast-port=' + str(config('cp-controller-uport'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
    key = 'log-level=' + str(config('cp-agent-log-level'))
    with open('/etc/cplane/cp-config.json', 'r') as file:
        filedata = file.read()
    if '"{}"'.format(config('cp-agent-log-level')) not in filedata:
        cmd = ['cp-agentd', 'set-config', key]
        subprocess.check_call(cmd)
    key = 'vm-mtu=' + str(config('cp-vm-mtu'))
    cmd = ['cp-agentd', 'set-config', key]
    subprocess.check_call(cmd)
Example #23
0
 def provide_data(self):
     hook_name = hookenv.hook_name()
     for service in self.services.values():
         for provider in service.get("provided_data", []):
             if re.match(r"{}-relation-(joined|changed)".format(provider.name), hook_name):
                 data = provider.provide_data()
                 if provider._is_ready(data):
                     hookenv.relation_set(None, data)
Example #24
0
def upnp_interval_changed():
    """Reconfigure the UPNP referesh interval when configuration changes."""
    if hookenv.hook_name() == "install":
        return
    ph.remove_upnp_cron()

    if ph.charm_config["enable-upnp"]:
        ph.add_upnp_cron()
Example #25
0
def cert_interval_changed():
    """Reconfigure the certificate renewal interval when configuration changes."""
    if hookenv.hook_name() == "install":
        return
    ph.remove_cert_cron()

    if ph.charm_config["enable-letsencrypt"]:
        ph.add_cert_cron()
Example #26
0
def letsencrypt_config_changed():
    """Configure certbot when configuration changes."""
    if hookenv.hook_name() == "install":
        return
    ph.disable_letsencrypt()

    if ph.charm_config["enable-letsencrypt"]:
        ph.enable_letsencrypt()
Example #27
0
    def __call__(self):
        from charmhelpers.core import hookenv
        hook_name = hookenv.hook_name()

        with self.kv.hook_scope(hook_name):
            self._record_charm_version(hookenv.charm_dir())
            delta_config, delta_relation = self._record_hook(hookenv)
            yield self.kv, delta_config, delta_relation
Example #28
0
def check_really_is_update_status():
    """Clear the is-update-status-hook if the hook is not assess-status.

    This is in case the previous update-status hook execution died for some
    reason and the flag never got cleared.
    """
    if hook_name() != 'update-status':
        clear_flag('is-update-status-hook')
Example #29
0
    def __call__(self):
        from charmhelpers.core import hookenv
        hook_name = hookenv.hook_name()

        with self.kv.hook_scope(hook_name):
            self._record_charm_version(hookenv.charm_dir())
            delta_config, delta_relation = self._record_hook(hookenv)
            yield self.kv, delta_config, delta_relation
Example #30
0
def _manage_upgrade_flags():
    hook_name = hookenv.hook_name()

    if hook_name == 'pre-series-upgrade':
        set_flag('upgrade.series.in-progress')

    if hook_name == 'post-series-upgrade':
        clear_flag('upgrade.series.in-progress')
Example #31
0
    def _startup(cls):
        # update data to be backwards compatible after fix for issue 28
        _migrate_conversations()

        if hookenv.hook_name().endswith('-relation-departed'):
            def depart_conv():
                cls(hookenv.relation_type()).conversation().depart()
            hookenv.atexit(depart_conv)
Example #32
0
def job_manager(service_name):
    logging.basicConfig(level=logging.DEBUG)
    hook_name = hookenv.hook_name()
    if hook_name in ('install', 'upgrade-charm'):
        manage_install(service_name)
    elif hook_name == 'health':
        report_health(service_name)
    else:
        manage_services(service_name)
Example #33
0
def services_to_pause_or_resume():
    if "post-series-upgrade" in hook_name():
        return services()
    else:
        # WARNING(lourot): the list ordering is important. See services() for
        # more details.
        return [
            service for service in services() if service != libvirt_daemon()
        ]
Example #34
0
def redirect_changed():
    """Reconfigure the HTTPS redirect behaviour when configuration changes."""
    if hookenv.hook_name() == "install":
        return

    if ph.charm_config["enable-https-redirect"]:
        ph.enable_redirect()
    else:
        ph.disable_redirect()
Example #35
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger hook and state handlers until the
    state settles out.  Finally,
    :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the state.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(),
                level=hookenv.INFO)

    # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039
    # ensure that external handlers can tell what hook they're running in
    if 'JUJU_HOOK_NAME' not in os.environ:
        os.environ['JUJU_HOOK_NAME'] = os.path.basename(sys.argv[0])

    # update data to be backwards compatible after fix for issue 28
    relations._migrate_conversations()

    def flush_kv():
        if unitdata._KV:
            unitdata._KV.flush()

    hookenv.atexit(flush_kv)
    if hookenv.hook_name().endswith('-relation-departed'):

        def depart_conv():
            rel = RelationBase.from_name(hookenv.relation_type())
            rel.conversation().depart()

        hookenv.atexit(depart_conv)
    try:
        bus.discover()
        hookenv._run_atstart()
        bus.dispatch()
    except SystemExit as x:
        if x.code is None or x.code == 0:
            hookenv._run_atexit()
        raise
    hookenv._run_atexit()
Example #36
0
def default_hook():
    if not hookenv.has_juju_version('1.24'):
        hookenv.status_set('blocked', 'Requires Juju 1.24 or higher')
        # Error state, since we don't have 1.24 to give a nice blocked state.
        raise SystemExit(1)

    # These need to be imported after bootstrap() or required Python
    # packages may not have been installed.
    import definitions

    # Only useful for debugging, or perhaps have this enabled with a config
    # option?
    ## from loglog import loglog
    ## loglog('/var/log/cassandra/system.log', prefix='C*: ')

    hookenv.log('*** {} Hook Start'.format(hookenv.hook_name()))
    sm = definitions.get_service_manager()
    sm.manage()
    hookenv.log('*** {} Hook Done'.format(hookenv.hook_name()))
Example #37
0
 def manage(self):
     """
     Handle the current hook by doing The Right Thing with the registered services.
     """
     hook_name = hookenv.hook_name()
     if hook_name == "stop":
         self.stop_services()
     else:
         self.provide_data()
         self.reconfigure_services()
Example #38
0
 def _manage_departed(self):
     hook_name = hookenv.hook_name()
     rel_hook = hook_name.startswith(self.endpoint_name + '-relation-')
     departed_hook = rel_hook and hook_name.endswith('-departed')
     if not departed_hook:
         return
     relation = self.relations[hookenv.relation_id()]
     unit = RelatedUnit(relation, hookenv.remote_unit())
     self.all_departed_units.append(unit)
     if not relation.joined_units:
         del self.relations[relation.relation_id]
Example #39
0
def stats_changed():
    """Reconfigure the stats endpoint when configuration changes."""
    if hookenv.hook_name() == "install":
        return

    if ph.charm_config["enable-stats"]:
        hookenv.log("Enabling stats for config change")
        ph.enable_stats()
    else:
        hookenv.log("Disabling stats for config change")
        ph.disable_stats()
Example #40
0
def restart_services():
    cmd = ['service', 'openvswitch-switch', 'restart']
    subprocess.check_call(cmd)
    juju_log('Restarting cp-agentd service for {} hook'.format(hook_name()))
    cmd = ['service', 'cp-agentd', 'stop']
    subprocess.check_call(cmd)
    cmd = ['service', 'cp-agentd', 'start']
    subprocess.check_call(cmd)

    cmd = ['update-rc.d', 'cp-agentd', 'enable']
    subprocess.check_call(cmd)
def restart_services():
    cmd = ['service', 'openvswitch-switch', 'restart']
    subprocess.check_call(cmd)
    juju_log('Restarting cp-agentd service for {} hook'.format(hook_name()))
    cmd = ['service', 'cp-agentd', 'stop']
    subprocess.check_call(cmd)
    cmd = ['service', 'cp-agentd', 'start']
    subprocess.check_call(cmd)

    cmd = ['update-rc.d', 'cp-agentd', 'enable']
    subprocess.check_call(cmd)
Example #42
0
def main(relation_name=None):
    """
    This is the main entry point for the reactive framework.  It calls
    :func:`~bus.discover` to find and load all reactive handlers (e.g.,
    :func:`@when <decorators.when>` decorated blocks), and then
    :func:`~bus.dispatch` to trigger handlers until the queue settles out.
    Finally, :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
    is called to persist the flags and other data.

    :param str relation_name: Optional name of the relation which is being handled.
    """
    restricted_mode = hookenv.hook_name() in [
        'meter-status-changed', 'collect-metrics'
    ]

    hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(),
                level=hookenv.INFO)
    if restricted_mode:
        hookenv.log('Restricted mode.', level=hookenv.INFO)

    # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039
    # ensure that external handlers can tell what hook they're running in
    if 'JUJU_HOOK_NAME' not in os.environ:
        os.environ['JUJU_HOOK_NAME'] = os.path.basename(sys.argv[0])

    try:
        bus.discover()
        if not restricted_mode:  # limit what gets run in restricted mode
            hookenv._run_atstart()
        bus.dispatch(restricted=restricted_mode)
    except Exception:
        tb = traceback.format_exc()
        hookenv.log('Hook error:\n{}'.format(tb), level=hookenv.ERROR)
        raise
    except SystemExit as x:
        if x.code not in (None, 0):
            raise

    if not restricted_mode:  # limit what gets run in restricted mode
        hookenv._run_atexit()
    unitdata._KV.flush()
Example #43
0
def deploy(s):
    config = hookenv.config()
    version = config.get('cf_version')
    if not version or version == 'latest':
        version = RELEASES[0]['releases'][1]
    generator = CharmGenerator(RELEASES, SERVICES)
    generator.select_release(version)
    charm_dir = hookenv.charm_dir()
    build_dir = os.path.join(charm_dir, 'build', str(version))
    with open(os.path.join(build_dir, 'bundles.yaml')) as fp:
        bundle = yaml.load(fp)
    options = setup_parser().parse_args(['--series', 'trusty',
                                         '--local-mods',
                                         '--retry', '3'])
    creds = JujuAPICredentials()
    env = APIEnvironment(creds['api_address'], creds['api_password'])
    deployment = JujuLoggingDeployment(
        name='cloudfoundry',
        data=bundle['cloudfoundry'],
        include_dirs=[],
        repo_path=build_dir)
    importer = Importer(env, deployment, options)
    env.connect()
    juju_home = os.environ['JUJU_HOME'] = os.path.join(charm_dir, '.juju')
    if not os.path.exists(juju_home):
        os.mkdir(juju_home)
    try:
        try:
            importer.run()
        except Exception as e:
            hook_name = hookenv.hook_name()
            if hook_name.startswith('orchestrator-relation-'):
                hookenv.log('Error adding orchestrator relation: {}'.format(str(e)), hookenv.ERROR)
            else:
                raise
        # manually add the implicit relation between the orchestrator and
        # the generated charms; this can't be done in the bundle because
        # the orchestrator is not defined in the bundle
        orchestrator = hookenv.service_name()
        for service_name, service_data in bundle['cloudfoundry']['services'].items():
            # XXX: explicitly check if service has orchestrator interface
            if not service_data['charm'].startswith('cs:'):
                try:
                    env.add_relation(orchestrator, service_name)
                except EnvError as e:
                    if e.message.endswith('relation already exists'):
                        continue  # existing relations are ok, just skip
                    else:
                        hookenv.log('Error adding orchestrator relation: {}'.format(str(e)), hookenv.ERROR)
        env.expose('haproxy')
    finally:
        env.close()
Example #44
0
 def manage(self):
     """
     Handle the current hook by doing The Right Thing with the registered services.
     """
     hook_name = hookenv.hook_name()
     if hook_name == 'stop':
         self.stop_services()
     else:
         self.provide_data()
         self.reconfigure_services()
     cfg = hookenv.config()
     if cfg.implicit_save:
         cfg.save()
Example #45
0
 def manage(self):
     """
     Handle the current hook by doing The Right Thing with the registered services.
     """
     hook_name = hookenv.hook_name()
     if hook_name == 'stop':
         self.stop_services()
     else:
         self.provide_data()
         self.reconfigure_services()
     cfg = hookenv.config()
     if cfg.implicit_save:
         cfg.save()
def install_packages(servicename):
    if hook_name() == "install":
        # Install drivers
        apt_install(['git'], fatal=True)
        check_call("sudo tar xvf files/networking-onos.tar", shell=True)
        check_call("cd networking-onos;sudo ./install_driver.sh;cd ..", shell=True)
        check_call("sudo tar xvf files/networking-sfc.tar", shell=True)
        check_call("cd networking-sfc;sudo ./install_driver.sh;cd ..", shell=True)
        # Install neutron
        pkgs = ['neutron-common', 'neutron-plugin-ml2']
        pkgs = filter_installed_packages(pkgs)
        apt_install(pkgs, fatal=True)
        # Update neutron table
        update_sfc()
Example #47
0
 def manage(self):
     """
     Handle the current hook by doing The Right Thing with the registered services.
     """
     hook_name = hookenv.hook_name()
     try:
         if hook_name == 'stop':
             self.stop_services()
         else:
             self.update_status_working()
             self.provide_data()
             self.reconfigure_services()
             self.update_status_done()
     except Exception as e:
         hookenv.juju_status('error', message=str(e))
         raise
def preinstall():
    '''Preinstallation data_ready hook.'''
    # Only run the preinstall hooks from the actual install hook.
    if hookenv.hook_name() == 'install':
        # Pre-exec
        pattern = os.path.join(hookenv.charm_dir(),
                               'exec.d', '*', 'charm-pre-install')
        for f in sorted(glob.glob(pattern)):
            if os.path.isfile(f) and os.access(f, os.X_OK):
                hookenv.log('Running preinstall hook {}'.format(f))
                subprocess.check_call(['sh', '-c', f])
            else:
                hookenv.log('Ingnoring preinstall hook {}'.format(f),
                            WARNING)
        else:
            hookenv.log('No preinstall hooks found')
 def manage(self):
     """
     Handle the current hook by doing The Right Thing with the registered services.
     """
     hookenv._run_atstart()
     try:
         hook_name = hookenv.hook_name()
         if hook_name == 'stop':
             self.stop_services()
         else:
             self.reconfigure_services()
             self.provide_data()
     except SystemExit as x:
         if x.code is None or x.code == 0:
             hookenv._run_atexit()
     hookenv._run_atexit()
Example #50
0
    def provide_data(self):
        """
        Set the relation data for each provider in the ``provided_data`` list.

        A provider must have a `name` attribute, which indicates which relation
        to set data on, and a `provide_data()` method, which returns a dict of
        data to set.
        """
        hook_name = hookenv.hook_name()
        for service in self.services.values():
            for provider in service.get('provided_data', []):
                if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
                    data = provider.provide_data()
                    _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
                    if _ready:
                        hookenv.relation_set(None, data)
Example #51
0
 def _changed(self):
     # Set the master/standby changed flags. The charm is
     # responsible for clearing this, if it cares. Flags are
     # cleared before being set to ensure triggers are triggered.
     upgrade = hookenv.hook_name() == 'upgrade-charm'
     self._reset_all_flags()
     key = self.expand_name('endpoint.{endpoint_name}.master.changed')
     if data_changed(key, [str(cs.master) for cs in self]) or (self.master and upgrade):
         self._clear_flag('{endpoint_name}.master.changed')
         self._set_flag('{endpoint_name}.master.changed')
         self._clear_flag('{endpoint_name}.database.changed')
         self._set_flag('{endpoint_name}.database.changed')
     key = self.expand_name('endpoint.{endpoint_name}.standbys.changed')
     if data_changed(key, [sorted(str(s) for s in cs.standbys) for cs in self]) or (self.standbys and upgrade):
         self._clear_flag('{endpoint_name}.standbys.changed')
         self._set_flag('{endpoint_name}.standbys.changed')
         self._clear_flag('{endpoint_name}.database.changed')
         self._set_flag('{endpoint_name}.database.changed')
     self._clear_flag('endpoint.{endpoint_name}.changed')
def register_configs():
    """
    Register config files with their respective contexts.
    Regstration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.
    release = get_os_codename_package('cinder-common', fatal=False) or 'folsom'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = []

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # Add charm ceph configuration to resources and
        # ensure directory actually exists
        mkdir(os.path.dirname(ceph_config_file()))
        mkdir(os.path.dirname(CEPH_CONF))
        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charms - nova-compute ceph.conf will be
        # lower priority that both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'wt').close()
        install_alternative(os.path.basename(CEPH_CONF),
                            CEPH_CONF, ceph_config_file())
        CONFIG_FILES[ceph_config_file()] = {
            'hook_contexts': [context.CephContext()],
            'services': ['cinder-volume'],
        }
        confs.append(ceph_config_file())

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    return configs
Example #53
0
def resource_map(release=None):
    """
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    """
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    release = release or os_release('cinder-common', base='icehouse')
    if relation_ids('backup-backend'):
        resource_map[CINDER_CONF]['services'].append('cinder-backup')
        resource_map[ceph_config_file()]['services'].append('cinder-backup')

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # need to create this early, new peers will have a relation during
        # registration # before they've run the ceph hooks to create the
        # directory.
        # !!! FIX: These side effects seem inappropriate for this method
        mkdir(os.path.dirname(CEPH_CONF))
        mkdir(os.path.dirname(ceph_config_file()))

        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charm - cinder ceph.conf will be
        # lower priority than both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'w').close()
        install_alternative(os.path.basename(CEPH_CONF),
                            CEPH_CONF, ceph_config_file())
    else:
        resource_map.pop(ceph_config_file())

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_SITE_CONF)
    else:
        resource_map.pop(APACHE_SITE_24_CONF)

    # Remove services from map which are not enabled by user config
    for cfg in resource_map.keys():
        resource_map[cfg]['services'] = \
            filter_services(resource_map[cfg]['services'])

    if enable_memcache(source=config()['openstack-origin']):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']}

    if run_in_apache():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'cinder-api' in svcs:
                svcs.remove('cinder-api')
                if 'apache2' not in svcs:
                    svcs.append('apache2')
        wsgi_script = "/usr/bin/cinder-wsgi"
        resource_map[WSGI_CINDER_API_CONF] = {
            'contexts': [context.WSGIWorkerConfigContext(name="cinder",
                                                         script=wsgi_script),
                         cinder_contexts.HAProxyContext()],
            'services': ['apache2']
        }

    if release and CompareOpenStackReleases(release) < 'queens':
        resource_map.pop(CINDER_POLICY_JSON)

    return resource_map
def set_os_workload_status(configs, required_interfaces, charm_func=None):
    """
    Set workload status based on complete contexts.
    status-set missing or incomplete contexts
    and juju-log details of missing required data.
    charm_func is a charm specific function to run checking
    for charm specific requirements such as a VIP setting.
    """
    incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
    state = 'active'
    missing_relations = []
    incomplete_relations = []
    message = None
    charm_state = None
    charm_message = None

    for generic_interface in incomplete_rel_data.keys():
        related_interface = None
        missing_data = {}
        # Related or not?
        for interface in incomplete_rel_data[generic_interface]:
            if incomplete_rel_data[generic_interface][interface].get('related'):
                related_interface = interface
                missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
        # No relation ID for the generic_interface
        if not related_interface:
            juju_log("{} relation is missing and must be related for "
                     "functionality. ".format(generic_interface), 'WARN')
            state = 'blocked'
            if generic_interface not in missing_relations:
                missing_relations.append(generic_interface)
        else:
            # Relation ID exists but no related unit
            if not missing_data:
                # Edge case relation ID exists but departing
                if ('departed' in hook_name() or 'broken' in hook_name()) \
                        and related_interface in hook_name():
                    state = 'blocked'
                    if generic_interface not in missing_relations:
                        missing_relations.append(generic_interface)
                    juju_log("{} relation's interface, {}, "
                             "relationship is departed or broken "
                             "and is required for functionality."
                             "".format(generic_interface, related_interface), "WARN")
                # Normal case relation ID exists but no related unit
                # (joining)
                else:
                    juju_log("{} relations's interface, {}, is related but has "
                             "no units in the relation."
                             "".format(generic_interface, related_interface), "INFO")
            # Related unit exists and data missing on the relation
            else:
                juju_log("{} relation's interface, {}, is related awaiting "
                         "the following data from the relationship: {}. "
                         "".format(generic_interface, related_interface,
                                   ", ".join(missing_data)), "INFO")
            if state != 'blocked':
                state = 'waiting'
            if generic_interface not in incomplete_relations \
                    and generic_interface not in missing_relations:
                incomplete_relations.append(generic_interface)

    if missing_relations:
        message = "Missing relations: {}".format(", ".join(missing_relations))
        if incomplete_relations:
            message += "; incomplete relations: {}" \
                       "".format(", ".join(incomplete_relations))
        state = 'blocked'
    elif incomplete_relations:
        message = "Incomplete relations: {}" \
                  "".format(", ".join(incomplete_relations))
        state = 'waiting'

    # Run charm specific checks
    if charm_func:
        charm_state, charm_message = charm_func(configs)
        if charm_state != 'active' and charm_state != 'unknown':
            state = workload_state_compare(state, charm_state)
            if message:
                charm_message = charm_message.replace("Incomplete relations: ",
                                                      "")
                message = "{}, {}".format(message, charm_message)
            else:
                message = charm_message

    # Set to active if all requirements have been met
    if state == 'active':
        message = "Unit is ready"
        juju_log(message, "INFO")

    status_set(state, message)
Example #55
0
def restart_cp_agentd():
    juju_log('Restarting cp-agentd service for {} hook'.format(hook_name()))
    cmd = ['service', 'cp-agentd', 'restart']
    subprocess.check_call(cmd)
Example #56
0
            'ports': [80, 28015, 29015],
            'provided_data': [WebsiteRelation()],
            'required_data': [
                docker.DockerPortMappings({
                    80: 8080,
                    28015: 28015,
                    29015: 29015,
                }),
                docker.DockerVolumes(mapped_volumes={config['storage-path']: '/rethinkdb'}),
                docker.DockerContainerArgs(
                    'rethinkdb',
                    '--bind', 'all',
                    '--canonical-address', hookenv.unit_get('public-address'),
                    '--canonical-address', hookenv.unit_get('private-address'),
                    '--machine-name', socket.gethostname().replace('-', '_'),
                ),
                ClusterPeers(),
            ],
            'start': docker.docker_start,
            'stop': docker.docker_stop,
        },
    ])
    manager.manage()


if __name__ == '__main__':
    if hookenv.hook_name() == 'install':
        install()
    else:
        manage()
 def wrapper(*args, **kw):
     hookenv.log("* Helper {}/{}".format(hookenv.hook_name(),
                                         func.__name__))
     return func(*args, **kw)