Example #1
0
    def dumpster_fire(cls, *_a, **_kw):
        """ dump the status.json file to cachedir

            Location and filename can be adjusted with the cachedir and
            hubble:status:dumpster options (see above).
        """
        try:
            if __salt__['config.get']('splunklogging', False):
                # lazy load to avoid circular import
                import hubblestack.log
                hubblestack.log.emit_to_splunk('Signal {0} detected'.format(signal.SIGUSR1),
                                               'INFO',
                                               'hubblestack.signals')
        finally:
            dumpster = get_hubble_status_opt('dumpster') or 'status.json'
            if not dumpster.startswith('/'):
                cachedir = get_hubble_or_salt_opt('cachedir') or '/tmp'
                dumpster = os.path.join(cachedir, dumpster)
            try:
                with open(dumpster, 'w') as status_file:
                    status_file.write(cls.as_json())
                    status_file.write('\n')
                log.info("wrote HubbleStatus to %s", dumpster)
            except Exception:
                log.exception("ignoring exception during dumpster fire")
Example #2
0
def emit_to_syslog(grains_to_emit):
    '''
    Emit grains and their values to syslog
    '''
    try:
        # Avoid a syslog line to be longer than 1024 characters
        # Build syslog message
        syslog_list = []
        syslog_list.append('hubble_syslog_message:')
        for grain in grains_to_emit:
            if grain in __grains__:
                if bool(__grains__[grain]) and isinstance(
                        __grains__[grain], dict):
                    for key, value in __grains__[grain].iteritems():
                        syslog_list.append('{0}={1}'.format(key, value))
                else:
                    syslog_list.append('{0}={1}'.format(
                        grain, __grains__[grain]))
        syslog_message = ' '.join(syslog_list)
        log.info('Emitting some grains to syslog')
        syslog.openlog(logoption=syslog.LOG_PID)
        syslog.syslog(syslog_message)
    except Exception as e:
        log.exception(
            'An exception occurred on emitting a message to syslog: {0}'.
            format(e))
Example #3
0
def _clear_gitfs_locks():
    """Clear old locks and log the changes"""
    # Clear old locks
    if "gitfs" in __opts__["fileserver_backend"] or "git" in __opts__[
            "fileserver_backend"]:
        git_objects = [
            hubblestack.utils.gitfs.GitFS(
                __opts__,
                __opts__["gitfs_remotes"],
                per_remote_overrides=hubblestack.fileserver.gitfs.
                PER_REMOTE_OVERRIDES,
                per_remote_only=hubblestack.fileserver.gitfs.PER_REMOTE_ONLY,
            )
        ]
        ret = {}
        for obj in git_objects:
            lock_type = "update"
            cleared, errors = hubblestack.fileserver.clear_lock(
                obj.clear_lock, "gitfs", remote=None, lock_type=lock_type)
            if cleared:
                ret.setdefault("cleared", []).extend(cleared)
            if errors:
                ret.setdefault("errors", []).extend(errors)
        if ret:
            log.info("One or more gitfs locks were removed: %s", ret)
Example #4
0
def main():
    """
    Run the main hubble loop
    """
    # Initial fileclient setup
    _clear_gitfs_locks()
    # Setup fileclient
    log.info('Setting up the fileclient/fileserver')
    retry_count = __opts__.get('fileserver_retry_count_on_startup', None)
    retry_time = __opts__.get('fileserver_retry_rate_on_startup', 30)
    count = 0
    while True:
        try:
            file_client = hubblestack.fileclient.get_file_client(__opts__)
            file_client.channel.fs.update()
            last_fc_update = time.time()
            break
        except Exception:
            if (retry_count is None
                    or count < retry_count) and not __opts__['function']:
                log.exception(
                    'Exception thrown trying to setup fileclient. '
                    'Trying again in %s seconds.', retry_time)
                count += 1
                time.sleep(retry_time)
                continue
            else:
                log.exception(
                    'Exception thrown trying to setup fileclient. Exiting.')
                sys.exit(1)
    # Check for single function run
    if __opts__['function']:
        run_function()
        sys.exit(0)
    last_grains_refresh = time.time() - __opts__['grains_refresh_frequency']
    log.info('Starting main loop')
    pidfile_count = 0
    # pidfile_refresh in seconds, our scheduler deals in half-seconds
    pidfile_refresh = int(__opts__.get('pidfile_refresh', 60)) * 2
    while True:
        # Check if fileserver needs update
        if time.time(
        ) - last_fc_update >= __opts__['fileserver_update_frequency']:
            last_fc_update = _update_fileserver(file_client)
        pidfile_count += 1
        if __opts__['daemonize'] and pidfile_count > pidfile_refresh:
            pidfile_count = 0
            create_pidfile()
        if time.time(
        ) - last_grains_refresh >= __opts__['grains_refresh_frequency']:
            last_grains_refresh = _emit_and_refresh_grains()
        try:
            log.debug('Executing schedule')
            sf_count = schedule()
        except Exception as exc:
            log.exception('Error executing schedule: %s', exc)
            if isinstance(exc, KeyboardInterrupt):
                raise exc
        time.sleep(__opts__.get('scheduler_sleep_frequency', 0.5))
Example #5
0
 def start_sigusr1_signal_handler(cls):
     """ start the signal.SIGUSR1 handler (dumps status to
         /var/cache/hubble/status.json or whatever is specified in
         cachedir + hubble:status:dumpster configs)
     """
     if not cls._signaled:
         cls._signaled = True
         if not hasattr(signal, 'SIGUSR1'):
             # TODO: invent invocation that works in windows instead of just complaining
             log.info("signal package lacks SIGUSR1, skipping SIGUSR1 status.json handler setup")
             return
         signal.signal(signal.SIGUSR1, cls.dumpster_fire)
Example #6
0
def run_function():
    '''
    Run a single function requested by the user
    '''
    # Parse the args
    args = []
    kwargs = {}
    for arg in __opts__['args']:
        if '=' in arg:
            kwarg, _, value = arg.partition('=')
            kwargs[kwarg] = value
        else:
            args.append(arg)

    log.debug('Parsed args: {0} | Parsed kwargs: {1}'.format(args, kwargs))
    log.info('Executing user-requested function {0}'.format(
        __opts__['function']))

    try:
        ret = __salt__[__opts__['function']](*args, **kwargs)
    except KeyError:
        log.error('Function {0} is not available, or not valid.'.format(
            __opts__['function']))
        sys.exit(1)

    if __opts__['return']:
        returner = '{0}.returner'.format(__opts__['return'])
        if returner not in __returners__:
            log.error('Could not find {0} returner.'.format(returner))
        else:
            log.info('Returning job data to {0}'.format(returner))
            returner_ret = {
                'id': __grains__['id'],
                'jid': salt.utils.jid.gen_jid(__opts__),
                'fun': __opts__['function'],
                'fun_args': args + ([kwargs] if kwargs else []),
                'return': ret,
                'retry': False
            }
            if __opts__.get('returner_retry', False):
                returner_ret['retry'] = True
            __returners__[returner](returner_ret)

    # TODO instantiate the salt outputter system?
    if __opts__['json_print']:
        print(json.dumps(ret))
    else:
        if not __opts__['no_pprint']:
            pprint.pprint(ret)
        else:
            print(ret)
Example #7
0
def _emit_and_refresh_grains():
    """ When the grains refresh frequency has expired, refresh grains and emit to syslog """
    log.info('Refreshing grains')
    refresh_grains()
    last_grains_refresh = time.time()
    # Emit syslog at grains refresh frequency
    if not (salt.utils.platform.is_windows()) and \
            __opts__.get('emit_grains_to_syslog', True):
        default_grains_to_emit = ['system_uuid', 'hubble_uuid', 'session_uuid',
                                  'machine_id', 'splunkindex', 'cloud_details',
                                  'hubble_version', 'localhost', 'fqdn']
        grains_to_emit = []
        grains_to_emit.extend(
            __opts__.get('emit_grains_to_syslog_list', default_grains_to_emit))
        emit_to_syslog(grains_to_emit)
    return last_grains_refresh
Example #8
0
def run_function():
    """
    Run a single function requested by the user
    """
    # Parse the args
    args = []
    kwargs = {}
    for arg in __opts__['args']:
        if '=' in arg:
            kwarg, _, value = arg.partition('=')
            kwargs[kwarg] = value
        else:
            args.append(arg)
    log.debug('Parsed args: %s | Parsed kwargs: %s', args, kwargs)
    log.info('Executing user-requested function %s', __opts__['function'])

    mod_fun = __mods__.get(__opts__['function'])
    if not mod_fun or not callable(mod_fun):
        log.error('Function %s is not available, or not valid.',
                  __opts__['function'])
        sys.exit(1)
    ret = mod_fun(*args, **kwargs)
    if __opts__['return']:
        returner = '{0}.returner'.format(__opts__['return'])
        if returner not in __returners__:
            log.error('Could not find %s returner.', returner)
        else:
            log.info('Returning job data to %s', returner)
            returner_ret = {
                'id': __grains__['id'],
                'jid': hubblestack.utils.jid.gen_jid(__opts__),
                'fun': __opts__['function'],
                'fun_args': args + ([kwargs] if kwargs else []),
                'return': ret
            }
            __returners__[returner](returner_ret)
    # TODO instantiate the salt outputter system?
    if __opts__['json_print']:
        print(json.dumps(ret))
    else:
        if not __opts__['no_pprint']:
            pprint.pprint(ret)
        else:
            print(ret)
Example #9
0
def run_function():
    """
    Run a single function requested by the user
    """
    # Parse the args
    args = []
    kwargs = {}
    for arg in __opts__["args"]:
        if "=" in arg:
            kwarg, _, value = arg.partition("=")
            kwargs[kwarg] = value
        else:
            args.append(arg)
    log.debug("Parsed args: %s | Parsed kwargs: %s", args, kwargs)
    log.info("Executing user-requested function %s", __opts__["function"])

    mod_fun = __mods__.get(__opts__["function"])
    if not mod_fun or not callable(mod_fun):
        log.error("Function %s is not available, or not valid.",
                  __opts__["function"])
        sys.exit(1)
    ret = mod_fun(*args, **kwargs)
    if __opts__["return"]:
        returner = "{0}.returner".format(__opts__["return"])
        if returner not in __returners__:
            log.error("Could not find %s returner.", returner)
        else:
            log.info("Returning job data to %s", returner)
            returner_ret = {
                "id": __grains__["id"],
                "jid": hubblestack.utils.jid.gen_jid(__opts__),
                "fun": __opts__["function"],
                "fun_args": args + ([kwargs] if kwargs else []),
                "return": ret,
            }
            __returners__[returner](returner_ret)
    # TODO instantiate the salt outputter system?
    if __opts__["json_print"]:
        print(json.dumps(ret))
    else:
        if not __opts__["no_pprint"]:
            pprint.pprint(ret)
        else:
            print(ret)
Example #10
0
def _clear_gitfs_locks():
    """ Clear old locks and log the changes """
    # Clear old locks
    if 'gitfs' in __opts__['fileserver_backend'] or 'git' in __opts__['fileserver_backend']:
        git_objects = [
            salt.utils.gitfs.GitFS(
                __opts__,
                __opts__['gitfs_remotes'],
                per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
                per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY)]
        ret = {}
        for obj in git_objects:
            lock_type = 'update'
            cleared, errors = salt.fileserver.clear_lock(obj.clear_lock, 'gitfs', remote=None,
                                                         lock_type=lock_type)
            if cleared:
                ret.setdefault('cleared', []).extend(cleared)
            if errors:
                ret.setdefault('errors', []).extend(errors)
        if ret:
            log.info('One or more gitfs locks were removed: %s', ret)
Example #11
0
def emit_to_syslog(grains_to_emit):
    """
    Emit grains and their values to syslog
    """
    try:
        # Avoid a syslog line to be longer than 1024 characters
        # Build syslog message
        syslog_list = ["hubble_syslog_message:"]
        for grain in grains_to_emit:
            if grain in __grains__:
                if bool(__grains__[grain]) and isinstance(
                        __grains__[grain], dict):
                    for key, value in __grains__[grain].items():
                        syslog_list.append("{0}={1}".format(key, value))
                else:
                    syslog_list.append("{0}={1}".format(
                        grain, __grains__[grain]))
        syslog_message = " ".join(syslog_list)
        log.info("Emitting some grains to syslog")
        syslog.openlog(logoption=syslog.LOG_PID)
        syslog.syslog(syslog_message)
    except Exception as exc:
        log.exception(
            "An exception occurred on emitting a message to syslog: %s", exc)
Example #12
0
def _emit_and_refresh_grains():
    """When the grains refresh frequency has expired, refresh grains and emit to syslog"""
    log.info("Refreshing grains")
    refresh_grains()
    last_grains_refresh = time.time()
    # Emit syslog at grains refresh frequency
    if not (hubblestack.utils.platform.is_windows()) and __opts__.get(
            "emit_grains_to_syslog", True):
        default_grains_to_emit = [
            "system_uuid",
            "hubble_uuid",
            "session_uuid",
            "machine_id",
            "splunkindex",
            "cloud_details",
            "hubble_version",
            "localhost",
            "fqdn",
        ]
        grains_to_emit = []
        grains_to_emit.extend(
            __opts__.get("emit_grains_to_syslog_list", default_grains_to_emit))
        emit_to_syslog(grains_to_emit)
    return last_grains_refresh
Example #13
0
def refresh_grains(initial=False):
    """
    Refresh the grains, pillar, utils, modules, and returners
    """
    global __opts__
    global __grains__
    global __utils__
    global __mods__
    global __pillar__
    global __returners__
    global __context__

    persist, old_grains = {}, {}
    if not initial:
        old_grains = copy.deepcopy(__grains__)
        for grain in __opts__.get('grains_persist', []):
            if grain in __grains__:
                persist[grain] = __grains__[grain]
        # Hardcode these core grains as persisting
        persist = {
            grain: __grains__[grain]
            for grain in ['hubble_version', 'buildinfo'] if grain in __grains__
        }

    if initial:
        __context__ = {}
    if 'grains' in __opts__:
        __opts__.pop('grains')
    if 'pillar' in __opts__:
        __opts__.pop('pillar')
    __grains__ = hubblestack.loader.grains(__opts__)
    __grains__.update(persist)
    __grains__['session_uuid'] = SESSION_UUID

    # This was a weird one. In older versions of hubble the version and
    # buildinfo were not persisted automatically which means that if you
    # installed a new version without restarting hubble, grains refresh could
    # cause that old daemon to report grains as if it were the new version.
    # Now if this hubble_marker_3 grain is present you know you can trust the
    # hubble_version and buildinfo.
    __grains__['hubble_marker_3'] = True

    old_grains.update(__grains__)
    __grains__ = old_grains

    # Check for default gateway and fall back if necessary
    if __grains__.get(
            'ip_gw',
            None) is False and 'fallback_fileserver_backend' in __opts__:
        log.info(
            'No default gateway detected; using fallback_fileserver_backend.')
        __opts__['fileserver_backend'] = __opts__[
            'fallback_fileserver_backend']

    __opts__['hubble_uuid'] = __grains__.get('hubble_uuid', None)
    __opts__['system_uuid'] = __grains__.get('system_uuid', None)
    __pillar__ = {}
    __opts__['grains'] = __grains__
    __opts__['pillar'] = __pillar__
    __utils__ = hubblestack.loader.utils(__opts__)
    __mods__ = hubblestack.loader.modules(__opts__,
                                          utils=__utils__,
                                          context=__context__)
    __returners__ = hubblestack.loader.returners(__opts__, __mods__)

    # the only things that turn up in here (and that get preserved)
    # are pulsar.queue, pulsar.notifier and cp.fileclient_###########
    # log.debug('keys in __context__: {}'.format(list(__context__)))

    hubblestack.utils.stdrec.__grains__ = __grains__
    hubblestack.utils.stdrec.__opts__ = __opts__

    hubblestack.hec.opt.__grains__ = __grains__
    hubblestack.hec.opt.__mods__ = __mods__
    hubblestack.hec.opt.__opts__ = __opts__

    hubblestack.log.splunk.__grains__ = __grains__
    hubblestack.log.splunk.__mods__ = __mods__
    hubblestack.log.splunk.__opts__ = __opts__

    hubblestack.status.__opts__ = __opts__
    hubblestack.status.__mods__ = __mods__

    hubblestack.utils.signing.__opts__ = __opts__
    hubblestack.utils.signing.__mods__ = __mods__

    hubblestack.module_runner.runner.__mods__ = __mods__
    hubblestack.module_runner.runner.__grains__ = __grains__
    hubblestack.module_runner.runner.__opts__ = __opts__

    hubblestack.module_runner.audit_runner.__mods__ = __mods__
    hubblestack.module_runner.audit_runner.__grains__ = __grains__
    hubblestack.module_runner.audit_runner.__opts__ = __opts__

    hubblestack.module_runner.fdg_runner.__mods__ = __mods__
    hubblestack.module_runner.fdg_runner.__grains__ = __grains__
    hubblestack.module_runner.fdg_runner.__opts__ = __opts__
    hubblestack.module_runner.fdg_runner.__returners__ = __returners__

    hubblestack.utils.signing.__mods__ = __mods__

    HSS.start_sigusr1_signal_handler()
    hubblestack.log.refresh_handler_std_info()
    clear_selective_context()

    if not initial and __mods__['config.get']('splunklogging', False):
        hubblestack.log.emit_to_splunk(__grains__, 'INFO',
                                       'hubblestack.grains_report')
Example #14
0
def main():
    '''
    Run the main hubble loop
    '''
    # Initial fileclient setup
    # Clear old locks
    if 'gitfs' in __opts__['fileserver_backend'] or 'git' in __opts__[
            'fileserver_backend']:
        git_objects = [
            salt.utils.gitfs.GitFS(
                __opts__,
                __opts__['gitfs_remotes'],
                per_remote_overrides=salt.fileserver.gitfs.
                PER_REMOTE_OVERRIDES,
                per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY)
        ]
        ret = {}
        for obj in git_objects:
            lock_type = 'update'
            cleared, errors = salt.fileserver.clear_lock(obj.clear_lock,
                                                         'gitfs',
                                                         remote=None,
                                                         lock_type=lock_type)
            if cleared:
                ret.setdefault('cleared', []).extend(cleared)
            if errors:
                ret.setdefault('errors', []).extend(errors)
        if ret:
            log.info('One or more gitfs locks were removed: {0}'.format(ret))

    # Setup fileclient
    log.info('Setting up the fileclient/fileserver')

    # Set up fileclient
    retry_count = __opts__.get('fileserver_retry_count_on_startup', None)
    retry_time = __opts__.get('fileserver_retry_rate_on_startup', 30)
    count = 0
    while True:
        try:
            fc = salt.fileclient.get_file_client(__opts__)
            fc.channel.fs.update()
            last_fc_update = time.time()
            break
        except Exception as exc:
            if (retry_count is None
                    or count < retry_count) and not __opts__['function']:
                log.exception(
                    'Exception thrown trying to setup fileclient. '
                    'Trying again in {0} seconds.'.format(retry_time))
                count += 1
                time.sleep(retry_time)
                continue
            else:
                log.exception(
                    'Exception thrown trying to setup fileclient. Exiting.')
                sys.exit(1)

    # Check for single function run
    if __opts__['function']:
        run_function()
        sys.exit(0)

    last_grains_refresh = time.time() - __opts__['grains_refresh_frequency']

    log.info('Starting main loop')
    pidfile_count = 0
    # pidfile_refresh in seconds, our scheduler deals in half-seconds
    pidfile_refresh = int(__opts__.get('pidfile_refresh', 60)) * 2
    while True:
        # Check if fileserver needs update
        if time.time(
        ) - last_fc_update >= __opts__['fileserver_update_frequency']:
            try:
                fc.channel.fs.update()
                last_fc_update = time.time()
            except Exception as exc:
                retry = __opts__.get('fileserver_retry_rate', 900)
                last_fc_update += retry
                log.exception('Exception thrown trying to update fileclient. '
                              'Trying again in {0} seconds.'.format(retry))

        pidfile_count += 1
        if __opts__['daemonize'] and pidfile_count > pidfile_refresh:
            pidfile_count = 0
            create_pidfile()

        if time.time(
        ) - last_grains_refresh >= __opts__['grains_refresh_frequency']:
            log.info('Refreshing grains')
            refresh_grains()
            last_grains_refresh = time.time()

            # Emit syslog at grains refresh frequency
            if not (salt.utils.platform.is_windows()) and __opts__.get(
                    'emit_grains_to_syslog', True):
                default_grains_to_emit = [
                    'system_uuid', 'hubble_uuid', 'session_uuid', 'machine_id',
                    'splunkindex', 'cloud_details', 'hubble_version',
                    'localhost', 'fqdn'
                ]
                grains_to_emit = []
                grains_to_emit.extend(
                    __opts__.get('emit_grains_to_syslog_list',
                                 default_grains_to_emit))
                emit_to_syslog(grains_to_emit)

        try:
            log.debug('Executing schedule')
            sf_count = schedule()
            if sf_count > 0:
                log.debug('Executed %d schedule item(s)', sf_count)
                hubblestack.log.workaround_salt_log_handler_queues()
        except Exception as e:
            log.exception('Error executing schedule: {0}'.format(e))
            if isinstance(e, KeyboardInterrupt):
                raise e

        time.sleep(__opts__.get('scheduler_sleep_frequency', 0.5))
Example #15
0
def refresh_grains(initial=False):
    """
    Refresh the grains, pillar, utils, modules, and returners
    """
    global __opts__
    global __grains__
    global __utils__
    global __mods__
    global __pillar__
    global __returners__
    global __context__

    # 'POP' is for tracking persistent opts protection
    if os.environ.get("NOISY_POP_DEBUG"):
        log.error("POP refreshing grains (id=%d)", id(__opts__))

    persist, old_grains = {}, {}
    if initial:
        if not os.environ.get("NO_PRESERVE_OPTS"):
            if os.environ.get("NOISY_POP_DEBUG"):
                log.error("POP setting __opts__ to preservable (id=%d)",
                          id(__opts__))
            hubblestack.loader.set_preservable_opts(__opts__)
        elif os.environ.get("NOISY_POP_DEBUG"):
            log.error(
                "POP we are not attemting to protect __opts__ from lazyloader reloads"
            )
    else:
        old_grains = copy.deepcopy(__grains__)
        for grain in __opts__.get("grains_persist", []):
            if grain in __grains__:
                persist[grain] = __grains__[grain]
        # Hardcode these core grains as persisting
        persist = {
            grain: __grains__[grain]
            for grain in ["hubble_version", "buildinfo"] if grain in __grains__
        }

    if initial:
        __context__ = {}
    if "grains" in __opts__:
        __opts__.pop("grains")
    if "pillar" in __opts__:
        __opts__.pop("pillar")
    __grains__ = hubblestack.loader.grains(__opts__)
    __grains__.update(persist)
    __grains__["session_uuid"] = SESSION_UUID

    # This was a weird one. In older versions of hubble the version and
    # buildinfo were not persisted automatically which means that if you
    # installed a new version without restarting hubble, grains refresh could
    # cause that old daemon to report grains as if it were the new version.
    # Now if this hubble_marker_3 grain is present you know you can trust the
    # hubble_version and buildinfo.
    __grains__["hubble_marker_3"] = True

    old_grains.update(__grains__)
    __grains__ = old_grains

    # Check for default gateway and fall back if necessary
    if __grains__.get(
            "ip_gw",
            None) is False and "fallback_fileserver_backend" in __opts__:
        log.info(
            "No default gateway detected; using fallback_fileserver_backend.")
        __opts__["fileserver_backend"] = __opts__[
            "fallback_fileserver_backend"]

    __opts__["hubble_uuid"] = __grains__.get("hubble_uuid", None)
    __opts__["system_uuid"] = __grains__.get("system_uuid", None)
    __pillar__ = {}
    __opts__["grains"] = __grains__
    __opts__["pillar"] = __pillar__
    __utils__ = hubblestack.loader.utils(__opts__)
    __mods__ = hubblestack.loader.modules(__opts__,
                                          utils=__utils__,
                                          context=__context__)
    __returners__ = hubblestack.loader.returners(__opts__, __mods__)

    # the only things that turn up in here (and that get preserved)
    # are pulsar.queue, pulsar.notifier and cp.fileclient_###########
    # log.debug('keys in __context__: {}'.format(list(__context__)))

    hubblestack.utils.stdrec.__grains__ = __grains__
    hubblestack.utils.stdrec.__opts__ = __opts__

    hubblestack.hec.opt.__grains__ = __grains__
    hubblestack.hec.opt.__mods__ = __mods__
    hubblestack.hec.opt.__opts__ = __opts__

    hubblestack.log.splunk.__grains__ = __grains__
    hubblestack.log.splunk.__mods__ = __mods__
    hubblestack.log.splunk.__opts__ = __opts__

    hubblestack.status.__opts__ = __opts__
    hubblestack.status.__mods__ = __mods__

    hubblestack.utils.signing.__opts__ = __opts__
    hubblestack.utils.signing.__mods__ = __mods__

    hubblestack.module_runner.runner.__mods__ = __mods__
    hubblestack.module_runner.runner.__grains__ = __grains__
    hubblestack.module_runner.runner.__opts__ = __opts__

    hubblestack.module_runner.audit_runner.__mods__ = __mods__
    hubblestack.module_runner.audit_runner.__grains__ = __grains__
    hubblestack.module_runner.audit_runner.__opts__ = __opts__

    hubblestack.module_runner.fdg_runner.__mods__ = __mods__
    hubblestack.module_runner.fdg_runner.__grains__ = __grains__
    hubblestack.module_runner.fdg_runner.__opts__ = __opts__
    hubblestack.module_runner.fdg_runner.__returners__ = __returners__

    hubblestack.utils.signing.__mods__ = __mods__

    HSS.start_sigusr1_signal_handler()
    hubblestack.log.refresh_handler_std_info()
    clear_selective_context()

    if not initial and __mods__["config.get"]("splunklogging", False):
        hubblestack.log.emit_to_splunk(__grains__, "INFO",
                                       "hubblestack.grains_report")