コード例 #1
0
ファイル: status.py プロジェクト: swipswaps/hubble
    def dumpster_fire(cls, *_a, **_kw):
        """ dump the status.json file to cachedir

            Location and filename can be adjusted with the cachedir and
            hubble:status:dumpster options (see above).
        """
        try:
            if __salt__['config.get']('splunklogging', False):
                # lazy load to avoid circular import
                import hubblestack.log
                hubblestack.log.emit_to_splunk('Signal {0} detected'.format(signal.SIGUSR1),
                                               'INFO',
                                               'hubblestack.signals')
        finally:
            dumpster = get_hubble_status_opt('dumpster') or 'status.json'
            if not dumpster.startswith('/'):
                cachedir = get_hubble_or_salt_opt('cachedir') or '/tmp'
                dumpster = os.path.join(cachedir, dumpster)
            try:
                with open(dumpster, 'w') as status_file:
                    status_file.write(cls.as_json())
                    status_file.write('\n')
                log.info("wrote HubbleStatus to %s", dumpster)
            except Exception:
                log.exception("ignoring exception during dumpster fire")
コード例 #2
0
ファイル: daemon.py プロジェクト: zachsis/hubble
def emit_to_syslog(grains_to_emit):
    '''
    Emit grains and their values to syslog
    '''
    try:
        # Avoid a syslog line to be longer than 1024 characters
        # Build syslog message
        syslog_list = []
        syslog_list.append('hubble_syslog_message:')
        for grain in grains_to_emit:
            if grain in __grains__:
                if bool(__grains__[grain]) and isinstance(
                        __grains__[grain], dict):
                    for key, value in __grains__[grain].iteritems():
                        syslog_list.append('{0}={1}'.format(key, value))
                else:
                    syslog_list.append('{0}={1}'.format(
                        grain, __grains__[grain]))
        syslog_message = ' '.join(syslog_list)
        log.info('Emitting some grains to syslog')
        syslog.openlog(logoption=syslog.LOG_PID)
        syslog.syslog(syslog_message)
    except Exception as e:
        log.exception(
            'An exception occurred on emitting a message to syslog: {0}'.
            format(e))
コード例 #3
0
def _setup_cached_uuid():
    """ Get the cached uuid and cached system uui path, read the files
    and remove the cached uuid """

    # Check for a cloned system with existing hubble_uuid
    def _get_uuid_from_system():
        query = '"SELECT uuid AS system_uuid FROM osquery_info;" --header=false --csv'

        # Prefer our /opt/osquery/osqueryi if present
        osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi',
                         '/usr/bin/osqueryi')
        for path in osqueryipaths:
            if hubblestack.utils.path.which(path):
                live_uuid = hubblestack.modules.cmdmod.run_stdout(
                    '{0} {1}'.format(path, query), output_loglevel='quiet')
                live_uuid = str(live_uuid).upper()
                if len(live_uuid) == 36:
                    return live_uuid
                return None
        # If osquery isn't available, attempt to get uuid from /sys path (linux only)
        try:
            with open('/sys/devices/virtual/dmi/id/product_uuid',
                      'r') as product_uuid_file:
                file_uuid = product_uuid_file.read()
            file_uuid = str(file_uuid).upper()
            if len(file_uuid) == 36:
                return file_uuid
            return None
        except Exception:
            return None

    cached_uuid_path = os.path.join(os.path.dirname(__opts__['configfile']),
                                    'hubble_cached_uuid')
    cached_system_uuid_path = os.path.join(
        os.path.dirname(__opts__['configfile']), 'hubble_cached_system_uuid')
    try:
        if os.path.isfile(cached_uuid_path) and os.path.isfile(
                cached_system_uuid_path):
            with open(cached_uuid_path, 'r') as cached_uuid_file, \
                    open(cached_system_uuid_path, 'r') as cached_system_uuid_file:
                cached_uuid = cached_uuid_file.read()
                cached_system_uuid = cached_system_uuid_file.read()
            if cached_uuid != cached_system_uuid:
                live_uuid = _get_uuid_from_system()
                if live_uuid != cached_system_uuid:
                    log.error(
                        "potentially cloned system detected: System_uuid grain "
                        "previously saved on disk doesn't match live system value.\n"
                        "Resettig cached hubble_uuid value.")
                    os.remove(cached_uuid_path)

    except Exception:
        log.exception(
            "Problem opening cache files while checking for previously cloned system"
        )
コード例 #4
0
ファイル: daemon.py プロジェクト: swipswaps/hubble
def _update_fileserver(file_client):
    """ Update the filserver and the last_fc_update time """
    try:
        file_client.channel.fs.update()
        last_fc_update = time.time()
    except Exception:
        retry = __opts__.get('fileserver_retry_rate', 900)
        last_fc_update += retry
        log.exception('Exception thrown trying to update fileclient. '
                      'Trying again in %s seconds.', retry)
    return last_fc_update
コード例 #5
0
def main():
    """
    Run the main hubble loop
    """
    # Initial fileclient setup
    _clear_gitfs_locks()
    # Setup fileclient
    log.info('Setting up the fileclient/fileserver')
    retry_count = __opts__.get('fileserver_retry_count_on_startup', None)
    retry_time = __opts__.get('fileserver_retry_rate_on_startup', 30)
    count = 0
    while True:
        try:
            file_client = hubblestack.fileclient.get_file_client(__opts__)
            file_client.channel.fs.update()
            last_fc_update = time.time()
            break
        except Exception:
            if (retry_count is None
                    or count < retry_count) and not __opts__['function']:
                log.exception(
                    'Exception thrown trying to setup fileclient. '
                    'Trying again in %s seconds.', retry_time)
                count += 1
                time.sleep(retry_time)
                continue
            else:
                log.exception(
                    'Exception thrown trying to setup fileclient. Exiting.')
                sys.exit(1)
    # Check for single function run
    if __opts__['function']:
        run_function()
        sys.exit(0)
    last_grains_refresh = time.time() - __opts__['grains_refresh_frequency']
    log.info('Starting main loop')
    pidfile_count = 0
    # pidfile_refresh in seconds, our scheduler deals in half-seconds
    pidfile_refresh = int(__opts__.get('pidfile_refresh', 60)) * 2
    while True:
        # Check if fileserver needs update
        if time.time(
        ) - last_fc_update >= __opts__['fileserver_update_frequency']:
            last_fc_update = _update_fileserver(file_client)
        pidfile_count += 1
        if __opts__['daemonize'] and pidfile_count > pidfile_refresh:
            pidfile_count = 0
            create_pidfile()
        if time.time(
        ) - last_grains_refresh >= __opts__['grains_refresh_frequency']:
            last_grains_refresh = _emit_and_refresh_grains()
        try:
            log.debug('Executing schedule')
            sf_count = schedule()
        except Exception as exc:
            log.exception('Error executing schedule: %s', exc)
            if isinstance(exc, KeyboardInterrupt):
                raise exc
        time.sleep(__opts__.get('scheduler_sleep_frequency', 0.5))
コード例 #6
0
def emit_to_syslog(grains_to_emit):
    """
    Emit grains and their values to syslog
    """
    try:
        # Avoid a syslog line to be longer than 1024 characters
        # Build syslog message
        syslog_list = ["hubble_syslog_message:"]
        for grain in grains_to_emit:
            if grain in __grains__:
                if bool(__grains__[grain]) and isinstance(
                        __grains__[grain], dict):
                    for key, value in __grains__[grain].items():
                        syslog_list.append("{0}={1}".format(key, value))
                else:
                    syslog_list.append("{0}={1}".format(
                        grain, __grains__[grain]))
        syslog_message = " ".join(syslog_list)
        log.info("Emitting some grains to syslog")
        syslog.openlog(logoption=syslog.LOG_PID)
        syslog.syslog(syslog_message)
    except Exception as exc:
        log.exception(
            "An exception occurred on emitting a message to syslog: %s", exc)
コード例 #7
0
ファイル: daemon.py プロジェクト: zachsis/hubble
def main():
    '''
    Run the main hubble loop
    '''
    # Initial fileclient setup
    # Clear old locks
    if 'gitfs' in __opts__['fileserver_backend'] or 'git' in __opts__[
            'fileserver_backend']:
        git_objects = [
            salt.utils.gitfs.GitFS(
                __opts__,
                __opts__['gitfs_remotes'],
                per_remote_overrides=salt.fileserver.gitfs.
                PER_REMOTE_OVERRIDES,
                per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY)
        ]
        ret = {}
        for obj in git_objects:
            lock_type = 'update'
            cleared, errors = salt.fileserver.clear_lock(obj.clear_lock,
                                                         'gitfs',
                                                         remote=None,
                                                         lock_type=lock_type)
            if cleared:
                ret.setdefault('cleared', []).extend(cleared)
            if errors:
                ret.setdefault('errors', []).extend(errors)
        if ret:
            log.info('One or more gitfs locks were removed: {0}'.format(ret))

    # Setup fileclient
    log.info('Setting up the fileclient/fileserver')

    # Set up fileclient
    retry_count = __opts__.get('fileserver_retry_count_on_startup', None)
    retry_time = __opts__.get('fileserver_retry_rate_on_startup', 30)
    count = 0
    while True:
        try:
            fc = salt.fileclient.get_file_client(__opts__)
            fc.channel.fs.update()
            last_fc_update = time.time()
            break
        except Exception as exc:
            if (retry_count is None
                    or count < retry_count) and not __opts__['function']:
                log.exception(
                    'Exception thrown trying to setup fileclient. '
                    'Trying again in {0} seconds.'.format(retry_time))
                count += 1
                time.sleep(retry_time)
                continue
            else:
                log.exception(
                    'Exception thrown trying to setup fileclient. Exiting.')
                sys.exit(1)

    # Check for single function run
    if __opts__['function']:
        run_function()
        sys.exit(0)

    last_grains_refresh = time.time() - __opts__['grains_refresh_frequency']

    log.info('Starting main loop')
    pidfile_count = 0
    # pidfile_refresh in seconds, our scheduler deals in half-seconds
    pidfile_refresh = int(__opts__.get('pidfile_refresh', 60)) * 2
    while True:
        # Check if fileserver needs update
        if time.time(
        ) - last_fc_update >= __opts__['fileserver_update_frequency']:
            try:
                fc.channel.fs.update()
                last_fc_update = time.time()
            except Exception as exc:
                retry = __opts__.get('fileserver_retry_rate', 900)
                last_fc_update += retry
                log.exception('Exception thrown trying to update fileclient. '
                              'Trying again in {0} seconds.'.format(retry))

        pidfile_count += 1
        if __opts__['daemonize'] and pidfile_count > pidfile_refresh:
            pidfile_count = 0
            create_pidfile()

        if time.time(
        ) - last_grains_refresh >= __opts__['grains_refresh_frequency']:
            log.info('Refreshing grains')
            refresh_grains()
            last_grains_refresh = time.time()

            # Emit syslog at grains refresh frequency
            if not (salt.utils.platform.is_windows()) and __opts__.get(
                    'emit_grains_to_syslog', True):
                default_grains_to_emit = [
                    'system_uuid', 'hubble_uuid', 'session_uuid', 'machine_id',
                    'splunkindex', 'cloud_details', 'hubble_version',
                    'localhost', 'fqdn'
                ]
                grains_to_emit = []
                grains_to_emit.extend(
                    __opts__.get('emit_grains_to_syslog_list',
                                 default_grains_to_emit))
                emit_to_syslog(grains_to_emit)

        try:
            log.debug('Executing schedule')
            sf_count = schedule()
            if sf_count > 0:
                log.debug('Executed %d schedule item(s)', sf_count)
                hubblestack.log.workaround_salt_log_handler_queues()
        except Exception as e:
            log.exception('Error executing schedule: {0}'.format(e))
            if isinstance(e, KeyboardInterrupt):
                raise e

        time.sleep(__opts__.get('scheduler_sleep_frequency', 0.5))
コード例 #8
0
ファイル: daemon.py プロジェクト: zachsis/hubble
def load_config():
    '''
    Load the config from configfile and load into imported salt modules
    '''
    # Parse arguments
    parsed_args = parse_args()

    # Let's find out the path of this module
    if 'SETUP_DIRNAME' in globals():
        # This is from the exec() call in Salt's setup.py
        this_file = os.path.join(SETUP_DIRNAME, 'salt', 'syspaths.py')  # pylint: disable=E0602
    else:
        this_file = __file__
    install_dir = os.path.dirname(os.path.realpath(this_file))

    # Load unique data for Windows or Linux
    if salt.utils.platform.is_windows():
        if parsed_args.get('configfile') is None:
            parsed_args[
                'configfile'] = 'C:\\Program Files (x86)\\Hubble\\etc\\hubble\\hubble.conf'
        salt.config.DEFAULT_MINION_OPTS[
            'cachedir'] = 'C:\\Program Files (x86)\\hubble\\var\\cache'
        salt.config.DEFAULT_MINION_OPTS[
            'pidfile'] = 'C:\\Program Files (x86)\\hubble\\var\\run\\hubble.pid'
        salt.config.DEFAULT_MINION_OPTS[
            'log_file'] = 'C:\\Program Files (x86)\\hubble\\var\\log\\hubble.log'
        salt.config.DEFAULT_MINION_OPTS[
            'osquery_dbpath'] = 'C:\\Program Files (x86)\\hubble\\var\\hubble_osquery_db'
        salt.config.DEFAULT_MINION_OPTS[
            'osquerylogpath'] = 'C:\\Program Files (x86)\\hubble\\var\\log\\hubble_osquery'
        salt.config.DEFAULT_MINION_OPTS['osquerylog_backupdir'] = \
                                        'C:\\Program Files (x86)\\hubble\\var\\log\\hubble_osquery\\backuplogs'

    else:
        if parsed_args.get('configfile') is None:
            parsed_args['configfile'] = '/etc/hubble/hubble'
        salt.config.DEFAULT_MINION_OPTS['cachedir'] = '/var/cache/hubble'
        salt.config.DEFAULT_MINION_OPTS['pidfile'] = '/var/run/hubble.pid'
        salt.config.DEFAULT_MINION_OPTS['log_file'] = '/var/log/hubble'
        salt.config.DEFAULT_MINION_OPTS[
            'osquery_dbpath'] = '/var/cache/hubble/osquery'
        salt.config.DEFAULT_MINION_OPTS[
            'osquerylogpath'] = '/var/log/hubble_osquery'
        salt.config.DEFAULT_MINION_OPTS[
            'osquerylog_backupdir'] = '/var/log/hubble_osquery/backuplogs'

    salt.config.DEFAULT_MINION_OPTS['file_roots'] = {'base': []}
    salt.config.DEFAULT_MINION_OPTS['log_level'] = 'error'
    salt.config.DEFAULT_MINION_OPTS['file_client'] = 'local'
    salt.config.DEFAULT_MINION_OPTS[
        'fileserver_update_frequency'] = 43200  # 12 hours
    salt.config.DEFAULT_MINION_OPTS[
        'grains_refresh_frequency'] = 3600  # 1 hour
    salt.config.DEFAULT_MINION_OPTS['scheduler_sleep_frequency'] = 0.5
    salt.config.DEFAULT_MINION_OPTS['default_include'] = 'hubble.d/*.conf'
    salt.config.DEFAULT_MINION_OPTS['logfile_maxbytes'] = 100000000  # 100MB
    salt.config.DEFAULT_MINION_OPTS[
        'logfile_backups'] = 1  # maximum rotated logs
    salt.config.DEFAULT_MINION_OPTS[
        'delete_inaccessible_azure_containers'] = False
    salt.config.DEFAULT_MINION_OPTS[
        'enable_globbing_in_nebula_masking'] = False  # Globbing will not be supported in nebula masking
    salt.config.DEFAULT_MINION_OPTS[
        'osquery_logfile_maxbytes'] = 50000000  # 50MB
    salt.config.DEFAULT_MINION_OPTS[
        'osquery_logfile_maxbytes_toparse'] = 100000000  #100MB
    salt.config.DEFAULT_MINION_OPTS['osquery_backuplogs_count'] = 2

    global __opts__

    __opts__ = salt.config.minion_config(parsed_args.get('configfile'))
    __opts__.update(parsed_args)
    __opts__['conf_file'] = parsed_args.get('configfile')
    __opts__['install_dir'] = install_dir

    if __opts__['version']:
        print(__version__)
        clean_up_process(None, None)
        sys.exit(0)

    if __opts__['buildinfo']:
        try:
            from hubblestack import __buildinfo__
        except ImportError:
            __buildinfo__ = 'NOT SET'
        print(__buildinfo__)
        clean_up_process(None, None)
        sys.exit(0)

    scan_proc = __opts__.get('scan_proc', False)

    if __opts__['daemonize']:
        # before becoming a daemon, check for other procs and possibly send
        # them a signal 15 (otherwise refuse to run)
        if not __opts__.get('ignore_running', False):
            check_pidfile(kill_other=True, scan_proc=scan_proc)
        salt.utils.daemonize()
        create_pidfile()
    elif not __opts__['function'] and not __opts__['version'] and not __opts__[
            'buildinfo']:
        # check the pidfile and possibly refuse to run
        # (assuming this isn't a single function call)
        if not __opts__.get('ignore_running', False):
            check_pidfile(kill_other=False, scan_proc=scan_proc)

    signal.signal(signal.SIGTERM, clean_up_process)
    signal.signal(signal.SIGINT, clean_up_process)
    signal.signal(signal.SIGABRT, clean_up_process)
    signal.signal(signal.SIGFPE, clean_up_process)
    signal.signal(signal.SIGILL, clean_up_process)
    signal.signal(signal.SIGSEGV, clean_up_process)
    if not salt.utils.platform.is_windows():
        signal.signal(signal.SIGHUP, clean_up_process)
        signal.signal(signal.SIGQUIT, clean_up_process)

    # Optional sleep to wait for network
    time.sleep(int(__opts__.get('startup_sleep', 0)))

    # Convert -vvv to log level
    if __opts__['log_level'] is None:
        # Default to 'error'
        __opts__['log_level'] = 'error'
        # Default to more verbose if we're daemonizing
        if __opts__['daemonize']:
            __opts__['log_level'] = 'info'
    # Handle the explicit -vvv settings
    if __opts__['verbose'] == 1:
        __opts__['log_level'] = 'warning'
    elif __opts__['verbose'] == 2:
        __opts__['log_level'] = 'info'
    elif __opts__['verbose'] >= 3:
        __opts__['log_level'] = 'debug'

    # Setup module/grain/returner dirs
    module_dirs = __opts__.get('module_dirs', [])
    module_dirs.append(
        os.path.join(os.path.dirname(__file__), 'extmods', 'modules'))
    __opts__['module_dirs'] = module_dirs
    grains_dirs = __opts__.get('grains_dirs', [])
    grains_dirs.append(
        os.path.join(os.path.dirname(__file__), 'extmods', 'grains'))
    __opts__['grains_dirs'] = grains_dirs
    returner_dirs = __opts__.get('returner_dirs', [])
    returner_dirs.append(
        os.path.join(os.path.dirname(__file__), 'extmods', 'returners'))
    __opts__['returner_dirs'] = returner_dirs
    fileserver_dirs = __opts__.get('fileserver_dirs', [])
    fileserver_dirs.append(
        os.path.join(os.path.dirname(__file__), 'extmods', 'fileserver'))
    __opts__['fileserver_dirs'] = fileserver_dirs
    utils_dirs = __opts__.get('utils_dirs', [])
    utils_dirs.append(
        os.path.join(os.path.dirname(__file__), 'extmods', 'utils'))
    __opts__['utils_dirs'] = utils_dirs
    fdg_dirs = __opts__.get('fdg_dirs', [])
    fdg_dirs.append(os.path.join(os.path.dirname(__file__), 'extmods', 'fdg'))
    __opts__['fdg_dirs'] = fdg_dirs
    __opts__['file_roots']['base'].insert(
        0, os.path.join(os.path.dirname(__file__), 'files'))
    if 'roots' not in __opts__['fileserver_backend']:
        __opts__['fileserver_backend'].append('roots')

    # Disable all of salt's boto modules, they give nothing but trouble to the loader
    disable_modules = __opts__.get('disable_modules', [])
    disable_modules.extend([
        'boto3_elasticache',
        'boto3_route53',
        'boto3_sns',
        'boto_apigateway',
        'boto_asg',
        'boto_cfn',
        'boto_cloudfront',
        'boto_cloudtrail',
        'boto_cloudwatch_event',
        'boto_cloudwatch',
        'boto_cognitoidentity',
        'boto_datapipeline',
        'boto_dynamodb',
        'boto_ec2',
        'boto_efs',
        'boto_elasticache',
        'boto_elasticsearch_domain',
        'boto_elb',
        'boto_elbv2',
        'boto_iam',
        'boto_iot',
        'boto_kinesis',
        'boto_kms',
        'boto_lambda',
        'boto_rds',
        'boto_route53',
        'boto_s3_bucket',
        'boto_s3',
        'boto_secgroup',
        'boto_sns',
        'boto_sqs',
        'boto_ssm',
        'boto_vpc',
    ])
    __opts__['disable_modules'] = disable_modules

    # Console logging is probably the same, but can be different
    console_logging_opts = {
        'log_level':
        __opts__.get('console_log_level', __opts__['log_level']),
        'log_format':
        __opts__.get('console_log_format',
                     '%(asctime)s [%(levelname)-5s] %(message)s'),
        'date_format':
        __opts__.get('console_log_date_format', '%H:%M:%S'),
    }
    file_logging_opts = {
        'log_file':
        __opts__.get('log_file', '/var/log/hubble'),
        'log_level':
        __opts__['log_level'],
        'log_format':
        __opts__.get(
            'log_format',
            '%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d]  %(message)s'
        ),
        'date_format':
        __opts__.get('log_date_format', '%Y-%m-%d %H:%M:%S'),
        'max_bytes':
        __opts__.get('logfile_maxbytes', 100000000),
        'backup_count':
        __opts__.get('logfile_backups', 1),
    }

    # Setup logging
    hubblestack.log.setup_console_logger(**console_logging_opts)
    hubblestack.log.setup_file_logger(**file_logging_opts)

    # 384 is 0o600 permissions, written without octal for python 2/3 compat
    os.chmod(__opts__['log_file'], 384)
    os.chmod(parsed_args.get('configfile'), 384)

    # Check for a cloned system with existing hubble_uuid
    def _get_uuid_from_system():
        query = '"SELECT uuid AS system_uuid FROM osquery_info;" --header=false --csv'

        # Prefer our /opt/osquery/osqueryi if present
        osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi',
                         '/usr/bin/osqueryi')
        for path in osqueryipaths:
            if salt.utils.path.which(path):
                live_uuid = salt.modules.cmdmod.run_stdout(
                    '{0} {1}'.format(path, query), output_loglevel='quiet')
                live_uuid = str(live_uuid).upper()
                if len(live_uuid) == 36:
                    return live_uuid
                else:
                    return None
        # If osquery isn't available, attempt to get uuid from /sys path (linux only)
        try:
            with open('/sys/devices/virtual/dmi/id/product_uuid', 'r') as f:
                file_uuid = f.read()
            file_uuid = str(file_uuid).upper()
            if len(file_uuid) == 36:
                return file_uuid
            else:
                return None
        except Exception:
            return None

    cached_uuid_path = os.path.join(os.path.dirname(__opts__['configfile']),
                                    'hubble_cached_uuid')
    cached_system_uuid_path = os.path.join(
        os.path.dirname(__opts__['configfile']), 'hubble_cached_system_uuid')
    try:
        if os.path.isfile(cached_uuid_path) and os.path.isfile(
                cached_system_uuid_path):
            with open(cached_uuid_path,
                      'r') as f, open(cached_system_uuid_path, 'r') as g:
                cached_uuid = f.read()
                cached_system_uuid = g.read()
            if cached_uuid != cached_system_uuid:
                live_uuid = _get_uuid_from_system()
                if live_uuid != cached_system_uuid:
                    log.error(
                        "potentially cloned system detected: System_uuid grain "
                        "previously saved on disk doesn't match live system value.\n"
                        "Resettig cached hubble_uuid value.")
                    os.remove(cached_uuid_path)

    except Exception:
        log.exception(
            "Problem opening cache files while checking for previously cloned system"
        )

    refresh_grains(initial=True)

    if __salt__['config.get']('splunklogging', False):
        hubblestack.log.setup_splunk_logger()
        hubblestack.log.emit_to_splunk(__grains__,
                                       'INFO',
                                       'hubblestack.grains_report',
                                       remove_sensitive_logs=True)