def returner(ret): """ Get fdg data and post it to Splunk """ data = ret['return'] if not data: return host_args = _build_args(ret) if host_args['fun'] != 'fdg.top' and host_args['fun'] != 'fdg.run': if len(data) < 2: log.error('Non-fdg data found in splunk_fdg_return: %s', data) return data = {data[0]: data[1]} # Get cloud details cloud_details = __grains__.get('cloud_details', {}) try: opts_list = get_splunk_options(sourcetype='hubble_fdg', add_query_to_sourcetype=True, _nick={'sourcetype_fdg': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s', json.dumps(opts)) # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) for fdg_info, fdg_results in data.items(): if not isinstance(fdg_results, list): fdg_results = [fdg_results] for fdg_result in fdg_results: payload = _generate_payload( args=host_args, opts=opts, index_extracted_fields=index_extracted_fields, fdg_args={ 'fdg_info': fdg_info, 'fdg_result': fdg_result }, cloud_details=cloud_details) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_fdg_return') return
def returner(ret): """ Get audit data and post it to Splunk """ data = ret['return'] if not isinstance(data, dict): log.error( 'Data sent to splunk_audit_return was not formed as a dict:\n%s', data) return host_args = _build_args(ret) # Get cloud details cloud_details = __grains__.get('cloud_details', {}) try: opts_list = get_splunk_options( sourcetype='hubble_audit_v2', _nick={'sourcetype_audit': 'sourcetype'}) for opts in opts_list: log.debug('Options: %s', json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) host_args['hec'] = hec # Failure checks _publish_data(args=host_args, checks=data.get('Failure', []), check_result='Failure', cloud_details=cloud_details, opts=opts) # Success checks _publish_data(args=host_args, checks=data.get('Success', []), check_result='Success', cloud_details=cloud_details, opts=opts) # Compliance checks if data.get('Compliance', None): host_args['Compliance'] = data['Compliance'] event = _generate_event(args=host_args, cloud_details=cloud_details, custom_fields=custom_fields, check_type='compliance') _publish_event(fqdn=host_args['fqdn'], event=event, opts=opts, hec=hec) hec.flushBatch() except Exception: log.exception('Error occurred in splunk_audit_return') return
def _build_hec(opts): """ Extract the appropriate parameters from opts, create and return the http_event_collector opts dict containing Splunk options to be passed to the `http_event_collector` """ args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) return hec
def returner(ret): """ Get pulsar data and post it to Splunk """ if isinstance(ret, dict) and not ret.get('return'): # Empty single return, let's not do any setup or anything return # Check whether or not data is batched: if isinstance(ret, dict): # Batching is disabled data = [ret] else: data = ret # Sometimes there are duplicate events in the list. Dedup them: data = _dedup_list(data) host_args = _build_args(ret) alerts = _build_alerts(data) # Get cloud details cloud_details = __grains__.get('cloud_details', {}) try: opts_list = get_splunk_options(sourcetype='hubble_fim', _nick={'sourcetype_pulsar': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s', json.dumps(opts)) # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend(__opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) for alert in alerts: if 'change' in alert: # Linux, normal pulsar # The second half of the change will be '|IN_ISDIR' for directories change = alert['change'].split('|')[0] # Skip the IN_IGNORED events if change == 'IN_IGNORED': continue event = _build_linux_event(alert, change) else: # Windows, win_pulsar event = _build_windows_event(alert) event = _update_event(opts['custom_fields'], host_args, cloud_details, event) payload = _build_payload(host_args, event, opts, index_extracted_fields) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_pulsar_return') return
def returner(ret): """ Get nebula data and post it to Splunk """ # st = 'salt:hubble:nova' if not ret['return']: return host_args = _build_args(ret) # Get cloud details cloud_details = __grains__.get('cloud_details', {}) try: opts_list = get_splunk_options( sourcetype='hubble_osquery', add_query_to_sourcetype=True, _nick={'sourcetype_nebula': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s', json.dumps(opts)) # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) for query in ret['return']: for query_name, query_results in query.items(): if 'data' not in query_results: query_results['data'] = [{'error': 'result missing'}] for query_result in query_results['data']: payload = _generate_payload( host_args=host_args, opts=opts, query_data={ 'query_name': query_name, 'query_result': query_result }, index_extracted_fields=index_extracted_fields, cloud_details=cloud_details) event_time = _check_time(query_result) hec.batchEvent(payload, eventtime=event_time) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_nebula_return') return
def __init__(self): super(SplunkHandler, self).__init__() self.opts_list = get_splunk_options() self.endpoint_list = [] for opts in self.opts_list: custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) fqdn = hubblestack.utils.stdrec.get_fqdn() event = {} event.update(hubblestack.utils.stdrec.std_info()) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, str): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload = {} payload.update({'host': fqdn}) payload.update({'index': opts['index']}) payload.update({'sourcetype': opts['sourcetype']}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in event and not isinstance(event[item], (list, dict, tuple)): fields["meta_%s" % item] = str(event[item]) if fields: payload.update({'fields': fields}) self.endpoint_list.append([hec, event, payload])
def returner(ret): """ Get osqueryd data and post it to Splunk """ data = ret['return'] if not data: return host_args = _build_args(ret) # Get cloud details cloud_details = __grains__.get('cloud_details', {}) try: opts_list = get_splunk_options( sourcetype='hubble_osqueryd', add_query_to_sourcetype=True, _nick={'sourcetype_osqueryd': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s', json.dumps(opts)) # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) for query_results in data: event = _generate_event(host_args=host_args, query_name=query_results['name'], query_results=query_results, cloud_details=cloud_details) if 'columns' in query_results: # This means we have result log event event.update(query_results['columns']) _generate_and_send_payload(hec=hec, host_args=host_args, opts=opts, event=event, query_results=query_results) elif 'snapshot' in query_results: # This means we have snapshot log event for q_result in query_results['snapshot']: n_event = copy.deepcopy(event) n_event.update(q_result) _generate_and_send_payload(hec=hec, host_args=host_args, opts=opts, event=n_event, query_results=query_results) else: log.error("Incompatible event data captured") hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_osqueryd_return') return
def returner(ret): try: opts_list = get_splunk_options( sourcetype_nova='hubble_audit', _nick={'sourcetype_nova': 'sourcetype'}) for opts in opts_list: log.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend(__opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) # st = 'salt:hubble:nova' data = ret['return'] minion_id = ret['id'] jid = ret['jid'] global RETRY RETRY = ret['retry'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id master = __grains__['master'] try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception('No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = ['localhost', 'localhost.localdomain', 'localhost6.localdomain6'] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn if __grains__['master']: master = __grains__['master'] else: master = socket.gethostname() # We *are* the master, so use our hostname if not isinstance(data, dict): log.error('Data sent to splunk_nova_return was not formed as a ' 'dict:\n{0}'.format(data)) return # Get cloud details cloud_details = __grains__.get('cloud_details', {}) for fai in data.get('Failure', []): check_id = fai.keys()[0] payload = {} event = {} event.update({'check_result': 'Failure'}) event.update({'check_id': check_id}) event.update({'job_id': jid}) if not isinstance(fai[check_id], dict): event.update({'description': fai[check_id]}) elif 'description' in fai[check_id]: for key, value in fai[check_id].iteritems(): if key not in ['tag']: event[key] = value event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update({'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'index': opts['index']}) payload.update({'sourcetype': opts['sourcetype']}) payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str(payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) for suc in data.get('Success', []): check_id = suc.keys()[0] payload = {} event = {} event.update({'check_result': 'Success'}) event.update({'check_id': check_id}) event.update({'job_id': jid}) if not isinstance(suc[check_id], dict): event.update({'description': suc[check_id]}) elif 'description' in suc[check_id]: for key, value in suc[check_id].iteritems(): if key not in ['tag']: event[key] = value event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update({'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'sourcetype': opts['sourcetype']}) payload.update({'index': opts['index']}) # Remove any empty fields from the event payload remove_keys = [k for k in event if event[k] == ""] for k in remove_keys: del event[k] payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str(payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) if data.get('Compliance', None): payload = {} event = {} event.update({'job_id': jid}) event.update({'compliance_percentage': data['Compliance']}) event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update({'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'sourcetype': opts['sourcetype']}) payload.update({'index': opts['index']}) payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str(payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_nova_return') return
def __init__(self): super(SplunkHandler, self).__init__() self.opts_list = get_splunk_options() self.endpoint_list = [] for opts in self.opts_list: custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) minion_id = __grains__['id'] master = __grains__['master'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception( 'No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = [ 'localhost', 'localhost.localdomain', 'localhost6.localdomain6' ] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn event = {} event.update(hubblestack.utils.stdrec.std_info()) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload = {} payload.update({'host': fqdn}) payload.update({'index': opts['index']}) payload.update({'sourcetype': opts['sourcetype']}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in event and not isinstance(event[item], (list, dict, tuple)): fields["meta_%s" % item] = str(event[item]) if fields: payload.update({'fields': fields}) self.endpoint_list.append((hec, event, payload))
def returner(ret): try: opts_list = get_splunk_options(sourcetype='hubble_fdg', sourcetype_fdg='hubble_fdg', add_query_to_sourcetype=True, _nick={'sourcetype_fdg': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) data = ret['return'] minion_id = ret['id'] jid = ret['jid'] fun = ret['fun'] global RETRY RETRY = ret['retry'] master = __grains__['master'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception( 'No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = [ 'localhost', 'localhost.localdomain', 'localhost6.localdomain6' ] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn # Get cloud details cloud_details = __grains__.get('cloud_details', {}) if not data: return else: if fun != 'fdg.top': if len(data) < 2: log.error( 'Non-fdg data found in splunk_fdg_return: {0}'. format(data)) return data = {data[0]: data[1]} for fdg_info, fdg_results in data.iteritems(): fdg_file, starting_chained = fdg_info fdg_file = fdg_file.lower().replace(' ', '_') if not isinstance(fdg_results, list): fdg_results = [fdg_results] for fdg_result in fdg_results: event = {} payload = {} event.update({'fdg_result': fdg_result[0]}) event.update({'fdg_status': fdg_result[1]}) event.update({'fdg_file': fdg_file}) event.update( {'fdg_starting_chained': starting_chained}) event.update({'job_id': jid}) event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update( {'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get']( custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update( {custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join( custom_field_value) event.update( {custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'index': opts['index']}) if opts['add_query_to_sourcetype']: payload.update({ 'sourcetype': "%s_%s" % (opts['sourcetype'], fdg_file) }) else: payload.update({'sourcetype': opts['sourcetype']}) # Remove any empty fields from the event payload remove_keys = [ k for k in event if event[k] == "" and not k.startswith('fdg_') ] for k in remove_keys: del event[k] payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance( payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str( payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_fdg_return') return
def returner(ret): try: if isinstance(ret, dict) and not ret.get('return'): # Empty single return, let's not do any setup or anything return opts_list = get_splunk_options( sourcetype_pulsar='hubble_fim', _nick={'sourcetype_pulsar': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) # Check whether or not data is batched: if isinstance(ret, dict): # Batching is disabled data = [ret] else: data = ret # Sometimes there are duplicate events in the list. Dedup them: data = _dedupList(data) minion_id = __opts__['id'] jid = ret['jid'] global RETRY RETRY = ret['retry'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id master = __grains__['master'] try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception( 'No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = [ 'localhost', 'localhost.localdomain', 'localhost6.localdomain6' ] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn # Get cloud details cloud_details = __grains__.get('cloud_details', {}) alerts = [] for item in data: events = item['return'] if not isinstance(events, list): events = [events] alerts.extend(events) for alert in alerts: event = {} payload = {} if ('change' in alert): # Linux, normal pulsar # The second half of the change will be '|IN_ISDIR' for directories change = alert['change'].split('|')[0] # Skip the IN_IGNORED events if change == 'IN_IGNORED': continue if len(alert['change'].split('|')) == 2: object_type = 'directory' else: object_type = 'file' actions = defaultdict(lambda: 'unknown') actions['IN_ACCESS'] = 'read' actions['IN_ATTRIB'] = 'acl_modified' actions['IN_CLOSE_NOWRITE'] = 'read' actions['IN_CLOSE_WRITE'] = 'read' actions['IN_CREATE'] = 'created' actions['IN_DELETE'] = 'deleted' actions['IN_DELETE_SELF'] = 'deleted' actions['IN_MODIFY'] = 'modified' actions['IN_MOVE_SELF'] = 'modified' actions['IN_MOVED_FROM'] = 'modified' actions['IN_MOVED_TO'] = 'modified' actions['IN_OPEN'] = 'read' actions['IN_MOVE'] = 'modified' actions['IN_CLOSE'] = 'read' event['action'] = actions[change] event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['path'] event['file_name'] = alert['name'] event['file_path'] = alert['tag'] event['pulsar_config'] = alert['pulsar_config'] if 'contents' in alert: event['contents'] = alert['contents'] if alert[ 'stats']: # Gather more data if the change wasn't a delete stats = alert['stats'] event['object_id'] = stats['inode'] event['file_acl'] = stats['mode'] event['file_create_time'] = stats['ctime'] event['file_modify_time'] = stats['mtime'] event['file_size'] = stats[ 'size'] / 1024.0 # Convert bytes to kilobytes event['user'] = stats['user'] event['group'] = stats['group'] if object_type == 'file': chk = alert.get('checksum') if chk: event['file_hash'] = chk event['file_hash_type'] = alert.get( 'checksum_type', 'unknown') else: # Windows, win_pulsar if alert.get('Accesses', None): change = alert['Accesses'] if alert['Hash'] == 'Item is a directory': object_type = 'directory' else: object_type = 'file' else: change = alert['Reason'] object_type = 'file' actions = defaultdict(lambda: 'unknown') actions['Delete'] = 'deleted' actions['Read Control'] = 'read' actions['Write DAC'] = 'acl_modified' actions['Write Owner'] = 'modified' actions['Synchronize'] = 'modified' actions['Access Sys Sec'] = 'read' actions['Read Data'] = 'read' actions['Write Data'] = 'modified' actions['Append Data'] = 'modified' actions['Read EA'] = 'read' actions['Write EA'] = 'modified' actions['Execute/Traverse'] = 'read' actions['Read Attributes'] = 'read' actions['Write Attributes'] = 'acl_modified' actions['Query Key Value'] = 'read' actions['Set Key Value'] = 'modified' actions['Create Sub Key'] = 'created' actions['Enumerate Sub-Keys'] = 'read' actions['Notify About Changes to Keys'] = 'read' actions['Create Link'] = 'created' actions['Print'] = 'read' actions['Basic info change'] = 'modified' actions['Compression change'] = 'modified' actions['Data extend'] = 'modified' actions['EA change'] = 'modified' actions['File create'] = 'created' actions['File delete'] = 'deleted' if alert.get('Accesses', None): event['action'] = actions[change] event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['Object Name'] event['file_name'] = os.path.basename( alert['Object Name']) event['file_path'] = os.path.dirname( alert['Object Name']) event['pulsar_config'] = alert['pulsar_config'] # TODO: Should we be reporting 'EntryType' or 'TimeGenerated? # EntryType reports whether attempt to change was successful. else: for c in change: if not event.get('action', None): event['action'] = actions.get(c, c) else: event['action'] += ', ' + actions.get(c, c) event['change_type'] = 'filesystem' event['object_category'] = object_type event['object_path'] = alert['Full path'] event['file_name'] = alert['File name'] event['file_path'] = alert['tag'] event['pulsar_config'] = alert.get( 'pulsar_config', 'hubblestack_pulsar_win_config.yaml') event['TimeGenerated'] = alert['Time stamp'] chk = alert.get('checksum') if chk: event['file_hash'] = chk event['file_hash_type'] = alert.get( 'checksum_type', 'unknown') event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update({'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get'](custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({custom_field_name: custom_field_value}) elif isinstance(custom_field_value, list): custom_field_value = ','.join(custom_field_value) event.update({custom_field_name: custom_field_value}) payload.update({'host': fqdn}) payload.update({'index': opts['index']}) payload.update({'sourcetype': opts['sourcetype']}) # Remove any empty fields from the event payload remove_keys = [k for k in event if event[k] == ""] for k in remove_keys: del event[k] payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance( payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str(payload['event'][item]) if fields: payload.update({'fields': fields}) hec.batchEvent(payload) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_pulsar_return') return
def returner(ret): try: opts_list = get_splunk_options( sourcetype_osqueryd='hubble_osqueryd', add_query_to_sourcetype=True, _nick={'sourcetype_osqueryd': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) data = ret['return'] minion_id = ret['id'] jid = ret['jid'] global RETRY RETRY = ret['retry'] master = __grains__['master'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception( 'No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = [ 'localhost', 'localhost.localdomain', 'localhost6.localdomain6' ] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn # Get cloud details cloud_details = __grains__.get('cloud_details', {}) if not data: return else: for query_results in data: event = {} query_name = query_results['name'] event.update({'query': query_name}) event.update({'job_id': jid}) event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update( {'system_uuid': __grains__.get('system_uuid')}) event.update({'epoch': query_results['epoch']}) event.update({'counter': query_results['counter']}) event.update({'action': query_results['action']}) event.update({'unixTime': query_results['unixTime']}) event.update(cloud_details) sourcetype = opts['sourcetype'] if opts['add_query_to_sourcetype']: # Remove 'pack_' from query name to shorten the sourcetype length sourcetype = opts[ 'sourcetype'] + '_' + query_name.replace( 'pack_', '') # If the osquery query includes a field called 'time' it will be checked. # If it's within the last year, it will be used as the eventtime. event_time = query_results.get('time', '') if 'columns' in query_results: #This means we have result log event event.update(query_results['columns']) _generate_and_send_payload(hec, opts['index'], sourcetype, fqdn, custom_fields, index_extracted_fields, event, event_time) elif 'snapshot' in query_results: #This means we have snapshot log event for q_result in query_results['snapshot']: n_event = copy.deepcopy(event) n_event.update(q_result) _generate_and_send_payload(hec, opts['index'], sourcetype, fqdn, custom_fields, index_extracted_fields, n_event, event_time) else: log.error("Incompatible event data captured") hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_osqueryd_return') return
def returner(ret): try: opts_list = get_splunk_options( sourcetype_nebula='hubble_osquery', add_query_to_sourcetype=True, _nick={'sourcetype_nebula': 'sourcetype'}) for opts in opts_list: logging.debug('Options: %s' % json.dumps(opts)) custom_fields = opts['custom_fields'] # Set up the fields to be extracted at index time. The field values must be strings. # Note that these fields will also still be available in the event data index_extracted_fields = [] try: index_extracted_fields.extend( __opts__.get('splunk_index_extracted_fields', [])) except TypeError: pass # Set up the collector args, kwargs = make_hec_args(opts) hec = http_event_collector(*args, **kwargs) # st = 'salt:hubble:nova' data = ret['return'] minion_id = ret['id'] jid = ret['jid'] global RETRY RETRY = ret['retry'] master = __grains__['master'] fqdn = __grains__['fqdn'] # Sometimes fqdn is blank. If it is, replace it with minion_id fqdn = fqdn if fqdn else minion_id try: fqdn_ip4 = __grains__.get('local_ip4') if not fqdn_ip4: fqdn_ip4 = __grains__['fqdn_ip4'][0] except IndexError: try: fqdn_ip4 = __grains__['ipv4'][0] except IndexError: raise Exception( 'No ipv4 grains found. Is net-tools installed?') if fqdn_ip4.startswith('127.'): for ip4_addr in __grains__['ipv4']: if ip4_addr and not ip4_addr.startswith('127.'): fqdn_ip4 = ip4_addr break local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn']) # Sometimes fqdn reports a value of localhost. If that happens, try another method. bad_fqdns = [ 'localhost', 'localhost.localdomain', 'localhost6.localdomain6' ] if fqdn in bad_fqdns: new_fqdn = socket.gethostname() if '.' not in new_fqdn or new_fqdn in bad_fqdns: new_fqdn = fqdn_ip4 fqdn = new_fqdn # Get cloud details cloud_details = __grains__.get('cloud_details', {}) if not data: return else: for query in data: for query_name, query_results in query.iteritems(): if 'data' not in query_results: query_results['data'] = [{ 'error': 'result missing' }] for query_result in query_results['data']: event = {} payload = {} event.update(query_result) event.update({'query': query_name}) event.update({'job_id': jid}) event.update({'master': master}) event.update({'minion_id': minion_id}) event.update({'dest_host': fqdn}) event.update({'dest_ip': fqdn_ip4}) event.update({'dest_fqdn': local_fqdn}) event.update( {'system_uuid': __grains__.get('system_uuid')}) event.update(cloud_details) for custom_field in custom_fields: custom_field_name = 'custom_' + custom_field custom_field_value = __salt__['config.get']( custom_field, '') if isinstance(custom_field_value, (str, unicode)): event.update({ custom_field_name: custom_field_value }) elif isinstance(custom_field_value, list): custom_field_value = ','.join( custom_field_value) event.update({ custom_field_name: custom_field_value }) payload.update({'host': fqdn}) payload.update({'index': opts['index']}) if opts['add_query_to_sourcetype']: payload.update({ 'sourcetype': "%s_%s" % (opts['sourcetype'], query_name) }) else: payload.update( {'sourcetype': opts['sourcetype']}) # Remove any empty fields from the event payload remove_keys = [k for k in event if event[k] == ""] for k in remove_keys: del event[k] payload.update({'event': event}) # Potentially add metadata fields: fields = {} for item in index_extracted_fields: if item in payload['event'] and not isinstance( payload['event'][item], (list, dict, tuple)): fields["meta_%s" % item] = str( payload['event'][item]) if fields: payload.update({'fields': fields}) # If the osquery query includes a field called 'time' it will be checked. # If it's within the last year, it will be used as the eventtime. event_time = query_result.get('time', '') try: if (datetime.fromtimestamp(time.time()) - datetime.fromtimestamp( float(event_time))).days > 365: event_time = '' except Exception: event_time = '' finally: hec.batchEvent(payload, eventtime=event_time) hec.flushBatch() except Exception: log.exception('Error ocurred in splunk_nebula_return') return