def main(): client = actconfig.setup() # Parse arguments opts, kwargs = Intersplunk.getKeywordsAndOptions() if not opts: Intersplunk.generateErrorResults( "Usage: | actadd <field1> ... <fieldN> [fact_type=<fact type>] [fact_value=<fact value]" ) return events, _, _ = Intersplunk.getOrganizedResults() # Annotate events for event in events: object_value = [] for field in opts: if event.get(field): object_value.append(event[field]) if not object_value: continue event.update(fact_search(client, object_value, **kwargs)) Intersplunk.outputResults(events)
def run(results, fields): try: values = set() for result in results: field = None for f, v in result.items(): if f not in ['count', 'percent']: field = f break else: continue value = result[field] if value.lower() == "other": value = ' '.join([ 'NOT %s="%s" ' % (field, v.replace('"', '\\"')) for v in values ]) + ' %s=*' % field elif value.lower() == "null": value = 'NOT %s=*' % field else: values.add(value) value = '%s="%s"' % (field, v.replace('"', '\\"')) result['_drilldown'] = value if '_drilldown' not in fields: fields.append('_drilldown') si.outputResults(results, {}, fields=fields) except Exception, e2: stack2 = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e2, stack2))
def parseSpan(span): #maxspan = [<integer> s|m|h|d] match = re.search("(\d*)([shdwmqy])", span) if match == None: si.generateErrorResults( " 'timeunit' argument required, such as s (seconds), h (hours), d (days), w (weeks), y (years). Optionally prefix with a number: 600s (10 minutes), 2w (2 weeks)." ) exit(-1) scalar, units = match.groups() if len(scalar) == 0: scalar = 1 secs = scalar = int(scalar) if units == "s": pass elif units == "h": secs *= 60 * 60 elif units == "d": secs *= 24 * 60 * 60 elif units == "w": secs *= 7 * 24 * 60 * 60 elif units == "m": secs *= 30 * 24 * 60 * 60 elif units == "q": secs *= 365 / 4. * 24 * 60 * 60 elif units == "y": secs *= 365 * 24 * 60 * 60 else: return None, None, None return secs, scalar, units
def run(results, fields): try: values = set() for result in results: field = None for f,v in result.items(): if f not in ['count','percent']: field = f break else: continue value = result[field] if value.lower() == "other": value = ' '.join(['NOT %s="%s" ' % (field, v.replace('"','\\"')) for v in values]) + ' %s=*' % field elif value.lower() == "null": value = 'NOT %s=*' % field else: values.add(value) value = '%s="%s"' % (field, v.replace('"','\\"')) result['_drilldown'] = value if '_drilldown' not in fields: fields.append('_drilldown') si.outputResults(results, {}, fields=fields) except Exception, e2: stack2 = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e2, stack2))
def parseSpan(span): #maxspan = [<integer> s|m|h|d] match = re.search("(\d*)([shdwmqy])", span) if match == None: si.generateErrorResults(" 'timeunit' argument required, such as s (seconds), h (hours), d (days), w (weeks), y (years). Optionally prefix with a number: 600s (10 minutes), 2w (2 weeks).") exit(-1) scalar, units = match.groups() if len(scalar) == 0: scalar = 1 secs = scalar = int(scalar) if units == "s": pass elif units == "h": secs *= 60 * 60 elif units == "d": secs *= 24 * 60 * 60 elif units == "w": secs *= 7 * 24 * 60 * 60 elif units == "m": secs *= 30 * 24 * 60 * 60 elif units == "q": secs *= 365/4. * 24 * 60 * 60 elif units == "y": secs *= 365 * 24 * 60 * 60 else: return None, None, None return secs, scalar, units
def run(spantext, seriesmode, results): try: secsPerSpan, scalar, unit = parseSpan(spantext) maxtime = -1 # for each results time_data = {} fields_seen = {} span = None latest = None for result in results: if maxtime < 0: try: maxtime = int(float(result['info_max_time'])) except: maxtime = int(time.time()) maxtime -= 1 # not inclusive if '_time' not in result: raise Exception("Missing required _time field on data") if span == None and '_span' in result: span = result['_span'] mytime = int(float(result['_time'])) spansago = int((maxtime - mytime) / secsPerSpan) new_time = mytime + (spansago * secsPerSpan) if new_time not in time_data: time_data[new_time] = {'_time': new_time, '_span': span} this_row = time_data[new_time] spanstart = maxtime - ((spansago + 1) * secsPerSpan) + 1 series = seriesName(series_mode, scalar, spansago, unit, spanstart) if spansago == 0: latest = series acount = len(result) for k, v in result.items(): if k not in [ '_time', 'info_sid', 'info_max_time', 'info_min_time', 'info_search_time', 'info_sid', '_span' ]: if k == 'count': attr = series else: attr = '%s_%s' % (k, series) this_row[attr] = result[k] fields_seen[attr] = spansago field_order = fields_seen.items() field_order.sort(lambda x, y: cmp(x[1], y[1])) field_order = [f for f, v in field_order] field_order.insert(0, '_time') field_order.append('_span') results = time_data.values() results.sort(lambda x, y: cmp(x['_time'], y['_time'])) si.outputResults(results, {}, fields=field_order) except Exception, e2: stack2 = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e2, stack2))
def run(spantext, seriesmode, results): try: secsPerSpan, scalar, unit = parseSpan(spantext) maxtime = -1 # for each results time_data = {} fields_seen = {} span = None latest = None for result in results: if maxtime < 0: try: maxtime = int(float(result['info_max_time'])) except: maxtime = int(time.time()) maxtime -= 1 # not inclusive if '_time' not in result: raise Exception("Missing required _time field on data") if span == None and '_span' in result: span = result['_span'] mytime = int(float(result['_time'])) spansago = int((maxtime-mytime) / secsPerSpan) new_time = mytime + (spansago * secsPerSpan) if new_time not in time_data: time_data[new_time] = { '_time': new_time, '_span': span } this_row = time_data[new_time] spanstart = maxtime - ((spansago+1)*secsPerSpan) + 1 series = seriesName(series_mode, scalar, spansago, unit, spanstart) if spansago == 0: latest = series acount = len(result) for k,v in result.items(): if k not in ['_time', 'info_sid', 'info_max_time', 'info_min_time', 'info_search_time', 'info_sid', '_span']: if k == 'count': attr = series else: attr = '%s_%s' % (k, series) this_row[attr] = result[k] fields_seen[attr] = spansago field_order = fields_seen.items() field_order.sort(lambda x,y: cmp(x[1], y[1])) field_order = [f for f,v in field_order] field_order.insert(0,'_time') field_order.append('_span') results = time_data.values() results.sort(lambda x,y: cmp(x['_time'], y['_time'])) si.outputResults(results, {}, fields=field_order) except Exception, e2: stack2 = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e2, stack2))
def main(): # get config from config file config = ConfigParser.ConfigParser() if os.path.exists(os.path.join('..', 'local', 'slack.conf')): config.readfp(open(os.path.join('..', 'local', 'slack.conf'))) else: config.readfp(open(os.path.join('..', 'default', 'slack.conf'))) # username and icon can only be set by conf username = config.get('config', 'username') icon = config.get('config', 'icon') # update args if user speicify them in search channel = kwargs.get('channel', config.get('config', 'channel')) if not channel.startswith('#'): channel = '#' + channel if config.get('config', 'allow_user_set_slack_url').lower() in TRUE_VALUES: url = kwargs.get('url', config.get('config', 'url')) else: url = config.get('config', 'url') # no url specified, dont procceed. if not url: raise Exception("Not slack url specified!") # read search results results = sis.readResults(None, None, True) https_proxy = config.get('config', 'proxy') proxyDict = { "https" : https_proxy } # prepare data to be sent to slack data = { 'text': get_pretty_table(results), 'username': username, 'channel': channel, 'icon_url': icon, 'mrkdwn': True, } if https_proxy != "": # send data to slack. r = requests.post(url, data=json.dumps(data), proxies=proxyDict) else: r = requests.post(url, data=json.dumps(data)) if r.status_code == 200: sis.outputResults(results) else: err_msg = ("Error sending results to slack, reason: {r}, {t}".format( r=r.reason, t=r.text)) sis.generateErrorResults(err_msg)
def main(): # get config from config file config = ConfigParser.ConfigParser() config.readfp(open(os.path.join('..', 'default', 'hipchat.conf'))) # update args if user speicify them in search room = kwargs.get('room', config.get('default', 'room')) color = kwargs.get('color', config.get('default', 'color')) notify = kwargs.get('notify', config.get('default', 'notify')) msg_fmt = kwargs.get('message_format', config.get('default', 'message_format')) if config.get('default', 'allow_users_set_base_url').lower() in TRUE_VALUES: base_url = kwargs.get('base_url', config.get('default', 'base_url')) else: base_url = config.get('default', 'base_url') # check if auth token is set properly try: auth_token = {"auth_token": config.get(room, 'auth_token')} except ConfigParser.NoSectionError as e: raise Exception("Room not set, please set the room stanza") except ConfigParser.NoOptionError as e: raise Exception("Auth token not set, please set auth token for room") # construct url url = base_url + "{s}{r}/notification".format( s='' if base_url.endswith('/') else '/', r=room) # read search results results = sis.readResults(None, None, True) # prepare data to be sent data = { 'message': get_pretty_table(results, msg_fmt), 'message_format': msg_fmt, 'color': color, 'notify': notify.lower() in TRUE_VALUES } # send data headers = {'Content-type': 'application/json'} r = requests.post(url, data=json.dumps(data), params=auth_token, headers=headers) if r.status_code == 204: sis.outputResults(results) else: err_msg = ("Error sending results to slack, reason: {r}, {t}".format( r=r.reason, t=r.text)) sis.generateErrorResults(err_msg)
def main(): bsmProc = BSMProcessor() optlist = None try: optlist, args = getopt.getopt(sys.argv[1:], '?', ['noCache=', 'filter=']) bsmProc.initFromOptlist(optlist) except getopt.error, val: print str(val) # tell them what was wrong bsmProc.usage() si.generateErrorResults("Incorrect usage...")
def getRanges(options): ranges = {} for name,startend in options.items(): if name in ['field','default']: continue try: start,end = re.match("(-?\d+)-(-?\d+)", startend).groups() ranges[name] = (float(start),float(end)) except: si.generateErrorResults("Invalid range: '%s'. '<start_num>-<end_num>' expected." % startend) exit(0) return ranges
def main(): # get config from config file config = ConfigParser.ConfigParser() if os.path.exists(os.path.join('..', 'local', 'slack.conf')): config.readfp(open(os.path.join('..', 'local', 'slack.conf'))) else: config.readfp(open(os.path.join('..', 'default', 'slack.conf'))) # username and icon can only be set by conf username = config.get('config', 'username') icon = config.get('config', 'icon') # update args if user speicify them in search channel = kwargs.get('channel', config.get('config', 'channel')) if not channel.startswith('#'): channel = '#' + channel if config.get('config', 'allow_user_set_slack_url').lower() in TRUE_VALUES: url = kwargs.get('url', config.get('config', 'url')) else: url = config.get('config', 'url') # no url specified, dont procceed. if not url: raise Exception("Not slack url specified!") # read search results results = sis.readResults(None, None, True) https_proxy = config.get('config', 'proxy') proxyDict = {"https": https_proxy} # prepare data to be sent to slack data = { 'text': get_pretty_table(results), 'username': username, 'channel': channel, 'icon_url': icon, 'mrkdwn': True, } if https_proxy != "": # send data to slack. r = requests.post(url, data=json.dumps(data), proxies=proxyDict) else: r = requests.post(url, data=json.dumps(data)) if r.status_code == 200: sis.outputResults(results) else: err_msg = ("Error sending results to slack, reason: {r}, {t}".format( r=r.reason, t=r.text)) sis.generateErrorResults(err_msg)
def main(): try: messages = {} keywords,options = si.getKeywordsAndOptions() DEFAULT_MAX_TYPES = 10 maxtypes = options.get('max', str(DEFAULT_MAX_TYPES)) error = None if not maxtypes.isdigit(): error = 'max must be an integer between 1-%s.' % MAXRESULTS else: maxtypes = int(maxtypes) if not (0 < maxtypes <= MAXRESULTS): error = 'max must be an integer between 1-%s.' % MAXRESULTS if error: si.generateErrorResults(error) return ignore_covered = 'notcovered' in keywords useraw = 'useraw' in keywords results,dummyresults,settings = si.getOrganizedResults() #for r in results: # for attr in r: # print attr, r[attr], len(r[attr]) if len(results) > MAXRESULTS: results = results[:MAXRESULTS] si.addWarnMessage(messages, "For performance reasons, the maximum number of results used to discover event types was capped at %s. Consider a more restrictive search." % MAXRESULTS) argc = len(sys.argv) argv = sys.argv sessionKey = settings.get("sessionKey", None) owner = settings.get("owner", None) namespace = settings.get("namespace", None) searchhead = '' try: searches = sutils.getCommands(settings.get("search", ''), None) firstcmd = searches[0][0][0] firstarg = searches[0][0][1].strip() if firstcmd == 'search' and firstarg != '*': searchhead = firstarg except Exception, e: pass results = discover(results, searchhead, maxtypes, ignore_covered, useraw) if len(results) == 0: si.addWarnMessage(messages, "Unable to isolate useful groups of events.")
def main(): try: output_fields = ['_time'] output_results = [] search_results, dummyresults, settings = intersplunk.getOrganizedResults( ) if search_results is None or len(search_results) == 0: intersplunk.outputResults(output_results, fields=output_fields) return fields = search_results[0].keys() is_field_valid, is_detection_needed = check_fields(fields) if not is_field_valid: intersplunk.parseError( 'This visualization requires timestamped, evenly spaced numeric time-series data. Try using the timechart command in your query.' ) if not is_detection_needed: intersplunk.outputResults(search_results, fields=search_results[0].keys()) return output_results, output_fields = wrap_anomaly_detection(search_results) intersplunk.outputResults(output_results, fields=output_fields) except: stack = traceback.format_exc() results = intersplunk.generateErrorResults("Error : Traceback: " + str(stack)) intersplunk.outputResults(results)
def main(): try: search_results, dummy_results, settings = intersplunk.getOrganizedResults() if len(search_results) > 0: output_results = cal_utilization(search_results) intersplunk.outputResults(output_results, fields=output_results[0].keys()) except: stack = traceback.format_exc() results = intersplunk.generateErrorResults("Error : Traceback: " + str(stack)) intersplunk.outputResults(results)
def initFromOptlist(self, optlist): # First read settings in config.ini, if it exists... self.readConfig() # ...now, for debugging and backward compat, allow command line # settings to override... self.readOptlist(optlist) if self.debug: keys = self.__dict__.keys() keys.sort() for k in keys: if k.startswith("_"): continue print k + "=" + str(self.__dict__[k]) # check min required args if self.prFlags == "": self.usage() #sys.exit() si.generateErrorResults("Too few arguments")
def main(dist, env): pm_config, config = load_pyden_config() pyden_location = pm_config.get('appsettings', 'location') if dist: if dist in config.sections(): write_pyden_config(pyden_location, config, "default-pys", "distribution", dist) else: Intersplunk.generateErrorResults( "The Python version %s is not installed yet." % dist) sys.exit(1) if env: if env in config.sections(): write_pyden_config(pyden_location, config, "default-pys", "environment", env) else: Intersplunk.generateErrorResults( "The virtual environment %s does not exist." % env) sys.exit(1) Intersplunk.outputResults([{"message": "Successfully changed defaults"}])
def getArgs(): badcounts = False try: maxcount = int(options.get('maxcount', '20')) if maxcount <= 0: badcounts = True except: badcounts = True if badcounts: si.generateErrorResults("Error: invalid required 'maxcount' (1-INF) setting.") exit() sizefield = options.get('sizefield', 'totalCount') pathfield = options.get('pathfield', 'source') if sizefield == None or pathfield == None: si.generateErrorResults("Error: both pathfield and sizefield must be specified.") exit() countfield = options.get('countfield', 'count') delimiter = options.get('sep', os.sep) return maxcount, sizefield, pathfield, countfield, delimiter
def download_python(version, build_path): base_url = simpleRequest( "/servicesNS/nobody/pyden-manager/properties/pyden/download/url", sessionKey=session_key)[1] try: dpr = requests.get(base_url + "{0}/".format(version), proxies=proxies) except Exception as ex: Intersplunk.generateErrorResults( "Exception thrown getting python: ({0}, {1})".format(type(ex), ex)) sys.exit(1) else: if dpr.status_code in range(200, 300): python_link = [ link for link in re.findall("href=\"(.*?)\"", dpr.content) if link.endswith('tgz') ][0] dpr = requests.get(base_url + "{0}/{1}".format(version, python_link), proxies=proxies) else: Intersplunk.generateErrorResults( "Failed to reach www.python.org. Request returned - Status code: {0}, Response: {1}" .format(dpr.status_code, dpr.text)) sys.exit(1) if dpr.status_code in range(200, 300): # save build_file = os.path.join(build_path, "Python-{0}.tgz".format(version)) with open(build_file, "w") as download: download.write(dpr.content) else: Intersplunk.generateErrorResults( "Failed to download python. Request returned - Status code: {0}, Response: {1}" .format(dpr.status_code, dpr.text)) sys.exit(1) return build_file
def return_results(module): try: results, dummy_results, settings = InterSplunk.getOrganizedResults() if isinstance(results, list) and len(results) > 0: new_results = module.process_iocs(results) elif len(sys.argv) > 1: new_results = module.process_iocs(None) except: stack = traceback.format_exc() new_results = InterSplunk.generateErrorResults("Error: " + str(stack)) InterSplunk.outputResults(new_results) return
def getArgs(): badcounts = False try: maxcount = int(options.get('maxcount', '20')) if maxcount <= 0: badcounts = True except: badcounts = True if badcounts: si.generateErrorResults( "Error: invalid required 'maxcount' (1-INF) setting.") exit() sizefield = options.get('sizefield', 'totalCount') pathfield = options.get('pathfield', 'source') if sizefield == None or pathfield == None: si.generateErrorResults( "Error: both pathfield and sizefield must be specified.") exit() countfield = options.get('countfield', 'count') delimiter = options.get('sep', os.sep) return maxcount, sizefield, pathfield, countfield, delimiter
def main(): """ """ lookup_path = '/opt/splunk/etc/apps/osweep/lookups' file_path = '{}/urlhaus_url_feed.csv'.format(lookup_path) if sys.argv[1].lower() == 'feed': data_feed = urlhaus.get_feed() urlhaus.write_file(data_feed, file_path) exit(0) try: results, dummy_results, settings = InterSplunk.getOrganizedResults() if isinstance(results, list) and len(results) > 0: new_results = process_master(results) elif len(sys.argv) > 1: new_results = process_master(None) except: stack = traceback.format_exc() new_results = InterSplunk.generateErrorResults("Error: " + str(stack)) InterSplunk.outputResults(new_results) return
def main(): try: search_results, dummyresults, settings = intersplunk.getOrganizedResults( ) output_fields = ['_time', '_span'] output_results = [] if search_results is None or len(search_results) == 0: intersplunk.outputResults(output_results, fields=output_fields) else: fields = search_results[0].keys() detected_fields = list( filter(lambda x: x != '_time' and x != '_span', fields)) search_results_length = range(len(search_results)) timestamp = [ int(str(search_results[i]['_time'])) for i in search_results_length ] output_results = [{ '_time': timestamp[i], '_span': search_results[i]['_span'] } for i in search_results_length] for cur_field in detected_fields: data = [ str(search_results[i][cur_field]) for i in search_results_length ] if preprocess(data, timestamp, search_results_length, output_results, cur_field): output_fields.append(cur_field) intersplunk.outputResults(output_results, fields=output_fields) except: stack = traceback.format_exc() results = intersplunk.generateErrorResults("Error : Traceback: " + str(stack)) intersplunk.outputResults(results)
def jpath(): try: keywords, options = si.getKeywordsAndOptions() legacy_args_fixer(options) defaultval = options.get('default', None) fn_input = options.get('input', options.get('field', '_raw')) fn_output = options.get('output', 'jpath') if len(keywords) != 1: si.generateErrorResults('Requires exactly one path argument.') sys.exit(0) path = keywords[0] # Handle literal (escaped) quotes. Presumably necessary because of raw args? path = path.replace(r'\"', '"') if "*" in fn_output: apply_output = output_to_wildcard else: apply_output = output_to_field try: jp = jmespath.compile(path) except ParseError as e: # Todo: Consider stripping off the last line " ^" pointing to the issue. # Not helpful since Splunk wraps the error message in a really ugly way. si.generateErrorResults( "Invalid JMESPath expression '{}'. {}".format(path, e)) sys.exit(0) results, dummyresults, settings = si.getOrganizedResults() # for each results for result in results: # get field value ojson = result.get(fn_input, None) added = False if ojson is not None: if isinstance(ojson, (list, tuple)): # XXX: Add proper support for multivalue input fields. Just use first value for now ojson = ojson[0] try: json_obj = json.loads(ojson) except ValueError: # Invalid JSON. Move on, nothing to see here. continue try: values = jp.search(json_obj, options=jp_options) apply_output(values, fn_output, result) result[ERROR_FIELD] = None added = True except UnknownFunctionError as e: # Can't detect invalid function names during the compile, but we want to treat # these like syntax errors: Stop processing immediately si.generateErrorResults( "Issue with JMESPath expression. {}".format(e)) sys.exit(0) except JMESPathError as e: # Not 100% sure I understand what these errors mean. Should they halt? result[ERROR_FIELD] = "JMESPath error: {}".format(e) except Exception as e: result[ERROR_FIELD] = "Exception: {}".format(e) if not added and defaultval is not None: result[fn_output] = defaultval si.outputResults(results) except Exception as e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e, stack))
results = sis.readResults(None, None, True) # prepare data to be sent data = { 'message': get_pretty_table(results, msg_fmt), 'message_format': msg_fmt, 'color': color, 'notify': notify.lower() in TRUE_VALUES } # send data headers = {'Content-type': 'application/json'} r = requests.post(url, data=json.dumps(data), params=auth_token, headers=headers) if r.status_code == 204: sis.outputResults(results) else: err_msg = ("Error sending results to slack, reason: {r}, {t}".format( r=r.reason, t=r.text)) sis.generateErrorResults(err_msg) try: main() except Exception, e: import traceback stack = traceback.format_exc() sis.generateErrorResults("Error '{e}'".format(e=e))
#!/opt/splunk/bin/python ############################################################ # # GET /agents/summary # ############################################################ import sys import splunk.Intersplunk as si import requests import json try: request = requests.get( sys.argv[1] + "/en-US/custom/SplunkAppForWazuh/agents/summary?ip=" + sys.argv[2] + "&port=" + sys.argv[3] + "&user="******"&pass="******"Error : Traceback: " + str(stack)) si.outputResults(data)
def usage(): si.generateErrorResults("Usage: searchstats <field>") exit(0)
logger.info("erex run with examples: %s" % examples) if len(keywords) == 0: msg = "A required fieldname is missing" elif examples == None: msg = "Value for 'examples' is required" else: try: maxtrainers = int(maxtrainers) if maxtrainers < 1 or maxtrainers > 1000: raise Exception() except: msg = "Value for 'maxtrainers' must be an integer between 1-1000" if msg != None: si.generateErrorResults(msg) exit(0) messages = {} results, dummyresults, settings = si.getOrganizedResults() values = [] # for first N result used as training for result in results[:maxtrainers]: val = result.get(fromfield, None) if val != None: values.append(val) examples = [ex.strip() for ex in examples.split(",")] if badexamples == None: badexamples = [] else:
import json from urllib import urlencode from splunk import entity import splunk.Intersplunk as si SEARCH_PARSER_PATH = 'search' if __name__ == '__main__': try: keywords, options = si.getKeywordsAndOptions() macro_field = options.get('macro_field', None) search_field = options.get('search_field', None) output_field = options.get('output_field', "new_field") if not macro_field: si.generateErrorResults('Requires macro_field field.') exit(0) if not search_field: si.generateErrorResults('Requires search_field field.') exit(0) results, dummyresults, settings = si.getOrganizedResults() sessionKey = settings.get("sessionKey", None) owner = "admin" for result in results: if result[macro_field] == "0": continue else: search = result.get(search_field, "") try:
def usage(): si.generateErrorResults( " 'timeunit' argument required, such as s (seconds), h (hours), d (days), w (weeks), or y (years). Optionally prefix with a number: 600s (10 minutes), 2w (2 weeks). Optionally add another argument to specify the time-range label: series=[short,exact,relative]" ) exit(-1)
if __name__ == '__main__': puppet_private_key = puppet_config('hostprivkey') puppet_client_cert = puppet_config('hostcert') puppet_master = puppet_config('inventory_server') puppet_master_port = puppet_config('inventory_port') conn = httplib.HTTPSConnection(puppet_master, puppet_master_port, key_file=puppet_private_key, cert_file=puppet_client_cert) conn.request('GET', '/production/facts_search/search', None, {'Accept': 'pson'}) resp = conn.getresponse() if resp.status == 200: puppet_hosts = json.loads(resp.read()) puppet_hosts.sort() puppet_host_tab = [ dict((('fqdn',fqdn), ('host',fqdn.split('.')[0]))) \ for fqdn in puppet_hosts ] si.outputResults(puppet_host_tab) else: si.generateErrorResults("Error: Status '%d', Reason '%s'" % (resp.status, resp.reason))
def getFileGroungs(results): try: maxcount, sizefield, pathfield, countfield, delimiter = getArgs() if len(results) <= maxcount: return results total = 0 files = {} for result in results: total += int(result.get(sizefield, "0")) size = int(result.get(sizefield, "0")) path = result.get(pathfield, "") # FIX TRAILING DELIMITERS -- /foo/ -> /foo adding /foo/'s count to /foo if path!=delimiter and path.endswith(delimiter): path = path[:-1] # FIX "" PATH TO BE "/" if path=="": path = delimiter files[path] = size pathobjs = [] notCovered = [] deleted = [] # CREATE MAP OF PATH TO ALL FILES IN IT dirobjs = {} for path, size in files.items(): notCovered.append(path) if path == delimiter: myparts = [delimiter] else: myparts = path.split(delimiter) mypath = "" lastpos = len(myparts)-1 for i, val in enumerate(myparts): mypath += val ellipse = "" isDir = False if i < lastpos: mypath += delimiter isDir = True pathinfo = dirobjs.get(mypath, { 'count':0, 'total':0, 'files':set()}) pathinfo['isDir'] = isDir pathinfo['count'] += 1 pathinfo['total'] += size pathinfo['files'].add(path) dirobjs[mypath] = pathinfo # SCORE EACH PATH pathsAndScores = [] resultcount = len(results) neededToEliminate = len(results) - maxcount for mypath,val in dirobjs.items(): depth = mypath.count(delimiter) + 1 score = scorePath(depth, val, neededToEliminate) pathsAndScores.append((mypath, score)) #print "score: %s\tpath: %s " % (score, mypath) # SORT PATHS BY SCORE pathsAndScores.sort(floatSort) dirs = [] # FOR EACH PATH, FROM BEST-TO-CUT TO WORST, CUT UNTIL FEW ENOUGH RESULTS for i, pathinfo in enumerate(pathsAndScores): if (len(notCovered) + len(dirs)) <= maxcount: break mypath = pathinfo[0] myscore = pathinfo[1] if isCovered(mypath, dirs): continue files = dirobjs[mypath]['files'] #print len(files), myscore # REMOVE FILES COVERED BY THIS PATH FROM THE SET OF PATHS WE HAVE YET TO COVER removedCount = listDiff(notCovered, files) if removedCount > 0: # ADD DIRECTORY TO LIST dirs.append(mypath) #print len(notCovered), len(dirs) filesAndDirs = list(notCovered) filesAndDirs.extend(dirs) #print "MAXCOUNT %s FILESANDDIRS %s" % (maxcount, len(filesAndDirs)) if len(filesAndDirs) > 0: results = [] for i, mypath in enumerate(filesAndDirs): myinfo = dirobjs[mypath] count = myinfo['count'] total = myinfo['total'] if myinfo['isDir']: mypath += "*" results.append({ pathfield: mypath, sizefield:total, countfield:count}) return results except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e, stack)) si.generateErrorResults("Error '%s'." % e) #(e, stack))
def usage(): si.generateErrorResults("not implimented") exit(-1)
argList.append(notes) argList.append("-p") argList.append(shape) argList.append("-R") argList.append(readRole) argList.append("-S") argList.append(search) argList.append("-s") argList.append(save) argList.append("-t") argList.append(term_list) argList.append("-u") argList.append(uom) argList.append("-W") argList.append(writeRole) settings = saUtils.getSettings(sys.stdin) argList.append("-E") argList.append(settings['namespace']) argList.append("-I") argList.append(settings['infoPath']) saUtils.runProcess(sys.argv[0], "xsCreateADContext", argList, True) if containerName == '': containerName = contextName (worked, response, content) = saUtils.force_lookup_replication(settings['namespace'], containerName, settings['sessionKey'], None) except Exception, e: si.generateErrorResults(e)
outputKeyword = "output" elif outputKeyword == "output": outfile = arg if outfile == '': raise Exception( "xsAggregateSpearmanCorrelation-F-001: Usage: xsAggregateSpearmanCorrelation [OUTPUT filename]" ) try: binary = os.path.dirname(sys.argv[0]) + "/" + platform.system( ) + "/" + platform.architecture( )[0] + "/xsAggregateSpearmanCorrelation" if (platform.system() == 'Windows'): binary = binary + ".exe" if not os.path.isfile(binary): raise Exception( "xsAggregateSpearmanCorrelation-F-000: Can't find binary file " + binary) if outfile == '': subprocess.call([binary]) else: subprocess.call([binary, '-f', outfile]) if platform.system() == 'Windows': sys.stdout.flush() time.sleep(1.0) except Exception, e: si.generateErrorResults(e)
def tostr(node): if isinstance(node, lxml.etree._Element): if len(node.getchildren()) == 0: return node.text return etree.tostring(node) return str(node) if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() defaultval = options.get('default', None) field = options.get('field', '_raw') outfield = options.get('outfield', 'xpath') if len(keywords) != 1: si.generateErrorResults('Requires exactly one path argument.') exit(0) path = keywords[0] results,dummyresults,settings = si.getOrganizedResults() # for each results for result in results: # get field value myxml = result.get(field, None) added = False if myxml != None: # make event value valid xml myxml = "<data>%s</data>" % myxml try: et = etree.parse(StringIO.StringIO(myxml)) nodes = et.xpath(path) values = [tostr(node) for node in nodes]
def usage(): si.generateErrorResults(" 'timeunit' argument required, such as s (seconds), h (hours), d (days), w (weeks), or y (years). Optionally prefix with a number: 600s (10 minutes), 2w (2 weeks). Optionally add another argument to specify the time-range label: series=[short,exact,relative]") exit(-1)
logger.error("gpath = %s" % gpath) # find all files matching complete_path = os.path.expanduser( os.path.expandvars(gpath)) glob_matches = glob.glob(complete_path) logger.debug("complete path: %s" % complete_path) logger.debug("glob matches: %s" % glob_matches) if len(glob_matches)==0: logger.error("No file matching %s" % complete_path) raise Exception("No files matching %s." % complete_path) for pfile in glob_matches: logger.error("parsing file: %s" % pfile) results += parse_raw_pstack(pfile, thread_id, reverse, separator, fileorderindex) #return results return results # noinspection PyUnreachableCode if __name__ == '__main__': try: si.outputResults(raw_pstack(), messages, fields) except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Following error occurred while parsing pstack: '%s'." % (e)) logger.error("%s. %s" % (e, stack))
import splunk.Intersplunk as si from common import get_sos_server, run_btool import subprocess import time _time=time.time() sos_server=get_sos_server() #################################### # main function #################################### if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() if len(keywords) == 0: si.generateErrorResults('Requires a conf file name.') exit(0) conffile = ' '.join(keywords) # Handle extra args: e.g. 'app=learned' becomes --app=learned btool_options = [] for (opt,arg) in options.items(): btool_options.append("--%s=%s" % (opt, arg)) btool_args = btool_options + [ conffile, "list-debug" ] results = [] for (app, stanza, lines) in run_btool(*btool_args): results.append({"_raw" : "\n".join(lines), "_time" : _time, "stanza": stanza, "app" : app, "sos_server" : sos_server,
ranges[name] = (float(start),float(end)) except: si.generateErrorResults("Invalid range: '%s'. '<start_num>-<end_num>' expected." % startend) exit(0) return ranges if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() # field=foo green[0::20] yellow[21::80] red[81::100] # field=foo green=0-20 yellow=21-80 red=81-100 default=black field = options.get('field', None) if field == None: si.generateErrorResults("'field' argument required, such as field=y") exit(0) ranges = getRanges(options) (isgetinfo, sys.argv) = si.isGetInfo(sys.argv) if isgetinfo: # outputInfo automatically calls sys.exit() si.outputInfo(True, False, True, False, None, True, False, [field]) defaultval = options.get('default', 'None') results,dummyresults,settings = si.getOrganizedResults() # for each results for result in results: # get field value myvalue = result.get(field, None) myranges = []
# mvfind(search_indexes, default_indexes) if __name__ == '__main__': try: keywords, options = si.getKeywordsAndOptions() examples = options.get('pattern_field', None) match_list = options.get('list', None) output_field = options.get('output_field', None) mode = options.get('mode', None) index_field = options.get('index_field', None) st_field = options.get('st_field', None) defaults = options.get('defaults', None) allowed = options.get('allowed', None) dm = options.get('dm_field') if not examples: si.generateErrorResults('Requires pattern_field field.') exit(0) if not match_list and not st_field: si.generateErrorResults('Requires list field.') exit(0) if not output_field: output_field = "new_field" results, dummyresults, settings = si.getOrganizedResults() for result in results: if result.get(dm, None): continue if not mode: result[output_field] = mvfind(result[examples], result[match_list]) elif mode == "x":
logger.error(examples) if len(keywords) == 0: msg = "A required fieldname is missing" elif examples == None: msg = "Value for 'examples' is required" else: try: maxtrainers = int(maxtrainers) if maxtrainers < 1 or maxtrainers > 1000: raise Exception() except: msg = "Value for 'maxtrainers' must be an integer between 1-1000" if msg != None: si.generateErrorResults(msg) exit(0) messages = {} results,dummyresults,settings = si.getOrganizedResults() values = [] # for first N result used as training for result in results[:maxtrainers]: val = result.get(fromfield, None) if val != None: values.append(val) examples = [ex.strip() for ex in examples.split(",")] if badexamples == None: badexamples = [] else:
# Setup logger object logger = octopus_common.setup_logging() logger.info(time.time()) try: octopus_url = protocol + "://" + hostname + "/api/users/all" # Setup response object and execute GET request response = requests.get( url = octopus_url, headers = { "X-Octopus-ApiKey": apikey, }, ) response.raise_for_status() # Handle response json_response = json.loads(response.content) # Iterate users and print results to Splunk for user in json_response: print json.dumps(user) sys.exit(0) # Catch exceptions if needed except Exception as e: logger.exception("Exception: " + str(e)) isp.generateErrorResults(str(e))
import hashlib import splunk.Intersplunk as si if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() if len(keywords) == 0: si.generateErrorResults('Requires fields list.') exit(0) search = ' '.join(keywords) results,dummyresults,settings = si.getOrganizedResults() for result in results: eventSignature = '-=XXX=-'.join([result.get(field,'') for field in keywords]) sigHash = hashlib.md5(eventSignature).hexdigest() result['_icon'] = sigHash si.outputResults(results) except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e, stack))
'白沙': {'loc': [109.3703, 19.211], 'fullName': '白沙黎族自治县'}, '琼海': {'loc': [110.4208, 19.224], 'fullName': '琼海市'}, '昌江': {'loc': [109.0407, 19.2137], 'fullName': '昌江黎族自治县'}, '临高': {'loc': [109.6957, 19.8063], 'fullName': '临高县'}, '陵水': {'loc': [109.9924, 18.5415], 'fullName': '陵水黎族自治县'}, '屯昌': {'loc': [110.0377, 19.362], 'fullName': '屯昌县'}, '定安': {'loc': [110.3384, 19.4698], 'fullName': '定安县'}, '保亭': {'loc': [109.6284, 18.6108], 'fullName': '保亭黎族苗族自治县'}, '五指': {'loc': [109.5282, 18.8299], 'fullName': '五指山市'} } if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() if len(keywords) == 0: si.generateErrorResults('Requires city field.') exit(0) city = ' '.join(keywords) results,dummyresults,settings = si.getOrganizedResults() for result in results: try: myvalue = result.get(city, None) myvalue = myvalue[:6] if dataset[myvalue] != None: result['city_lng'] = dataset[myvalue]['loc'][0] result['city_lat'] = dataset[myvalue]['loc'][1] result['fullcityname'] = dataset[myvalue]['fullName'] except: pass si.outputResults(results)
# e.g., '/data/inputs/monitor' entity = keywords[0] logger.info("Entity: %s Args: %s" % (entity, args)) results = [] # we don't care about incoming results try: entitys = en.getEntities(entity, sessionKey=sessionKey, owner=owner, namespace=namespace, count=-1) for name, entity in entitys.items(): try: myapp = entity["eai:acl"]["app"] if namespace != None and myapp != namespace: continue except: continue # if no eai:acl/app, filter out result = entityToResult(name, entity) results.append(result) except splunk.ResourceNotFound, e2: pass si.outputResults(results, messages) except Exception, e: import traceback stack = traceback.format_exc() logger.error(str(e) + ". Traceback: " + str(stack)) si.generateErrorResults(str(e)) if __name__ == "__main__": execute()
def main(): output = [] def Log(s, verb_level, code=1, extra=None, force_print=False): if verb_level <= log.getverbosity(): output.extend(s.split("\n")) # def PrintCollectionStatus(col_stats, force_print=False): # # raise ValueError(type(col_stats.matched_chain_pair[1])) # output.append({ # "num_backup_sets": # }) # log.PrintCollectionStatus = PrintCollectionStatus results = None try: settings = dict() Intersplunk.readResults(None, settings, True) dup_time.setcurtime() archive_dir = os.path.join(app_dir, "local", "data", "archive") try: os.makedirs(archive_dir) except: pass if sys.argv[1] == "splunk-last-backups": ap = argparse.ArgumentParser() ap.add_argument("--time", type=int) ap.add_argument("backend") args = ap.parse_args(sys.argv[2:]) dup_globals.gpg_profile = gpg.GPGProfile() dup_globals.gpg_profile.passphrase = os.environ["PASSPHRASE"] backend.import_backends() dup_globals.backend = backend.get_backend(args.backend) if dup_globals.backup_name is None: dup_globals.backup_name = commandline.generate_default_backup_name( args.backend) commandline.set_archive_dir(archive_dir) results = [] time = args.time col_stats = dup_collections.CollectionsStatus( dup_globals.backend, dup_globals.archive_dir_path, "list-current").set_values() try: sig_chain = col_stats.get_backup_chain_at_time(time) except dup_collections.CollectionsError: results.append({ "last_full_backup_time": 0, "last_incr_backup_time": 0, }) else: if sig_chain.incset_list: last_incr_backup_time = max( [incset.end_time for incset in sig_chain.incset_list]) else: last_incr_backup_time = 0 results.append({ "last_full_backup_time": col_stats.get_last_full_backup_time(), "last_incr_backup_time": last_incr_backup_time }) elif sys.argv[1] == "splunk-file-list": ap = argparse.ArgumentParser() ap.add_argument("--time") ap.add_argument("backend") args = ap.parse_args(sys.argv[2:]) args.time = int(args.time.split(".")[0]) dup_time.setcurtime(args.time) dup_globals.restore_time = args.time dup_globals.gpg_profile = gpg.GPGProfile() dup_globals.gpg_profile.passphrase = os.environ["PASSPHRASE"] backend.import_backends() dup_globals.backend = backend.get_backend(args.backend) if dup_globals.backup_name is None: dup_globals.backup_name = commandline.generate_default_backup_name( args.backend) commandline.set_archive_dir(archive_dir) results = [] col_stats = dup_collections.CollectionsStatus( dup_globals.backend, dup_globals.archive_dir_path, "list-current").set_values() time = args.time sig_chain = col_stats.get_signature_chain_at_time(time) path_iter = diffdir.get_combined_path_iter( sig_chain.get_fileobjs(time)) for path in path_iter: if path.difftype != u"deleted" and path.index: mode = bin(path.mode)[2:] perms = "" for p, val in enumerate(mode): if p in (0, 3, 6): c = "r" elif p in (1, 4, 7): c = "w" elif p in (2, 5, 8): c = "x" perms += c if int(val) else "-" if path.type == "dir": perms = "d" + perms elif path.type == "sym": perms = "l" + perms else: perms = "-" + perms results.append({ "perms": perms, "owner": path.stat.st_uid, "group": path.stat.st_gid, "size": path.stat.st_size, "modtime": path.stat.st_mtime, "filename": os.path.join(*path.index), }) else: args = ["--archive-dir", archive_dir] + sys.argv[1:] action = commandline.ProcessCommandLine(args) log.Log = Log try: dup_main.do_backup(action) except dup_collections.CollectionsError: results = [] except SystemExit: pass except Exception as e: import traceback # sys.stderr.write(traceback.format_exc()) Intersplunk.generateErrorResults("Traceback: %s" % traceback.format_exc()) return if output and not results: import time results = [{"_raw": "\n".join(output), "_time": time.time()}] if results: try: Intersplunk.outputResults(results) except Exception: import traceback sys.stderr.write(traceback.format_exc()) results = Intersplunk.generateErrorResults("Traceback: %s" % traceback.format_exc()) Intersplunk.outputResults(results)
#################################### # main function #################################### if __name__ == '__main__': try: pseudohandle = subprocess.Popen(["btool", "check"], shell=False, stdout=subprocess.PIPE) stdout = pseudohandle.communicate() results = [] lines = stdout[0].split("\n") for line in lines: typoMatch = re.match(r"^Possible typo in stanza (\[[^\]]*\]) in ([^,]*), line (\d+): ([\S]*)\s*=\s*([^\v]*)", line) if typoMatch: results.append({"_raw": line, "_time": _time, "sos_server": sos_server, "stanza": typoMatch.group(1), "fpath": typoMatch.group(2), "line": typoMatch.group(3), "key": typoMatch.group(4), "value": typoMatch.group(5)}) si.outputResults(results) except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e, stack))
def tostr(node): if isinstance(node, lxml.etree._Element): if len(node.getchildren()) == 0: return node.text or "Null" return etree.tostring(node) return str(node) if __name__ == '__main__': try: keywords, options = si.getKeywordsAndOptions() defaultval = options.get('default', None) field = options.get('field', '_raw') outfield = options.get('outfield', 'xpath') if len(keywords) != 1: si.generateErrorResults('Requires exactly one path argument.') exit(0) path = keywords[0] # Support for searching with absolute path if len(path) > 1 and path[0] == '/' and path[1] != '/': path = '/data' + path results, dummyresults, settings = si.getOrganizedResults() # for each results for result in results: # get field value myxml = result.get(field, None) added = False if myxml != None: # make event value valid xml myxml = "<data>%s</data>" % myxml try:
def getFileGroungs(results): try: maxcount, sizefield, pathfield, countfield, delimiter = getArgs() if len(results) <= maxcount: return results total = 0 files = {} for result in results: total += int(result.get(sizefield, "0")) size = int(result.get(sizefield, "0")) path = result.get(pathfield, "") # FIX TRAILING DELIMITERS -- /foo/ -> /foo adding /foo/'s count to /foo if path != delimiter and path.endswith(delimiter): path = path[:-1] # FIX "" PATH TO BE "/" if path == "": path = delimiter files[path] = size pathobjs = [] notCovered = [] deleted = [] # CREATE MAP OF PATH TO ALL FILES IN IT dirobjs = {} for path, size in files.items(): notCovered.append(path) if path == delimiter: myparts = [delimiter] else: myparts = path.split(delimiter) mypath = "" lastpos = len(myparts) - 1 for i, val in enumerate(myparts): mypath += val ellipse = "" isDir = False if i < lastpos: mypath += delimiter isDir = True pathinfo = dirobjs.get(mypath, { 'count': 0, 'total': 0, 'files': set() }) pathinfo['isDir'] = isDir pathinfo['count'] += 1 pathinfo['total'] += size pathinfo['files'].add(path) dirobjs[mypath] = pathinfo # SCORE EACH PATH pathsAndScores = [] resultcount = len(results) neededToEliminate = len(results) - maxcount for mypath, val in dirobjs.items(): depth = mypath.count(delimiter) + 1 score = scorePath(depth, val, neededToEliminate) pathsAndScores.append((mypath, score)) #print "score: %s\tpath: %s " % (score, mypath) # SORT PATHS BY SCORE pathsAndScores.sort(floatSort) dirs = [] # FOR EACH PATH, FROM BEST-TO-CUT TO WORST, CUT UNTIL FEW ENOUGH RESULTS for i, pathinfo in enumerate(pathsAndScores): if (len(notCovered) + len(dirs)) <= maxcount: break mypath = pathinfo[0] myscore = pathinfo[1] if isCovered(mypath, dirs): continue files = dirobjs[mypath]['files'] #print len(files), myscore # REMOVE FILES COVERED BY THIS PATH FROM THE SET OF PATHS WE HAVE YET TO COVER removedCount = listDiff(notCovered, files) if removedCount > 0: # ADD DIRECTORY TO LIST dirs.append(mypath) #print len(notCovered), len(dirs) filesAndDirs = list(notCovered) filesAndDirs.extend(dirs) #print "MAXCOUNT %s FILESANDDIRS %s" % (maxcount, len(filesAndDirs)) if len(filesAndDirs) > 0: results = [] for i, mypath in enumerate(filesAndDirs): myinfo = dirobjs[mypath] count = myinfo['count'] total = myinfo['total'] if myinfo['isDir']: mypath += "*" results.append({ pathfield: mypath, sizefield: total, countfield: count }) return results except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'. %s" % (e, stack)) si.generateErrorResults("Error '%s'." % e) #(e, stack))
def usage(): si.generateErrorResults("Usage: entity <endpoint>") exit(0)
if SYNC_ACCOUNTS_TASK in tasks: result = { 'Task': 'Sync Accounts' } try: result['Result'] = sync_task.sync_accounts() except Exception as err: result['Result'] = str(err) results.append(result) # 2. sync inputs for macros update if SYNC_MACROS_TASK in tasks: result = { 'Task': 'Sync Macros' } try: result['Result'] = sync_task.sync_macros() except Exception as err: result['Result'] = str(err) results.append(result) except: import traceback stack = traceback.format_exc() results = intersplunk.generateErrorResults("Error : Traceback: " + str(stack)) intersplunk.outputResults(results)
fixVersions = [] for f in issue['fixVersions']: fixVersions.append(f['name']) row['fixVersions'] = fixVersions # Custom fields for f in issue['customFieldValues']: if f['customfieldId'] == "customfield_10020": row['SFDCcase'] = f['values'] if f['customfieldId'] == "customfield_10091": row['TargetRelease'] = f['values'] row['host'] = hostname row['source'] = 'jira_soap' row['sourcetype'] = "jira_issues" # override _time if time argument is set if time_option == "now": row['_time'] = int(time.time()) else: row['_time'] = int(time.mktime(time.strptime(row[time_option], '%Y-%m-%d %H:%M:%S'))) results.append(row) isp.outputResults(results) except Exception, e: logger.exception(str(e)) isp.generateErrorResults(str(e))
GOOGLE_REST_URL = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s" def stripCommonHTML(text): tags = ['<b>', '</b>', '<i>', '</i>', '<a>', '</a>', '<a ', '<br>', '<br />', '"'] for tag in tags: text = text.replace(tag, "") return text if __name__ == '__main__': try: keywords,options = si.getKeywordsAndOptions() maxresults = int(options.get('maxresults', '10')) if len(keywords) == 0: si.generateErrorResults('Requires search terms.') exit(0) search = ' '.join(keywords) # results,dummyresults,settings = si.getOrganizedResults() results = [] now = str(int(time.mktime(time.gmtime()))) start = 0 # google rest api returns very few results, get many pages of a small number of results for loop in range(0, 20): try: # Define the query to pass to Google Search API query = urllib.urlencode({'q' : search, 'start' : start}) # Fetch the results and convert to JSON search_results = urllib2.urlopen(GOOGLE_REST_URL % query) data = json.loads(search_results.read())
if not url: raise Exception("Not slack url specified!") # read search results results = sis.readResults(None, None, True) # prepare data to be sent to slack data = { 'text': get_pretty_table(results), 'username': username, 'channel': channel, 'icon_url': icon, 'mrkdwn': True, } # send data to slack. r = requests.post(url, data=json.dumps(data)) if r.status_code == 200: sis.outputResults(results) else: err_msg = ("Error sending results to slack, reason: {r}, {t}".format( r=r.reason, t=r.text)) sis.generateErrorResults(err_msg) try: main() except Exception, e: import traceback stack = traceback.format_exc() sis.generateErrorResults("Error '{e}'. {s}".format(e=e, s=stack))
utc = result.get('_time', None) if utc == None: reltime = "unknown" else: diff = int(now - float(utc)) if diff < -60: reltime = "future" elif diff < 0: # handle weird case of client clock off slightly reltime = "now" elif diff == 0: reltime = "now" elif diff < MINUTE: reltime = unitval("second", diff) elif diff < HOUR: reltime = unitval("minute", diff / MINUTE) elif diff < DAY: reltime = unitval("hour", diff / HOUR) elif diff < MONTH: reltime = unitval("day", diff / DAY) elif diff < YEAR: reltime = unitval("month", diff / MONTH) else: reltime = unitval("year", diff / YEAR) result['reltime'] = reltime si.outputResults(results) except Exception, e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'" % e)
reltime = "unknown" elif utc == None: reltime = "unknown" else: diff = int(now - float(utc)) if diff < -60: reltime = "future" elif diff < 0: # handle weird case of client clock off slightly reltime = "now" elif diff == 0: reltime = "now" elif diff < MINUTE: reltime = unitval("second", diff) elif diff < HOUR: reltime = unitval("minute", diff / MINUTE) elif diff < DAY: reltime = unitval("hour", diff / HOUR) elif diff < MONTH: reltime = unitval("day", diff / DAY) elif diff < YEAR: reltime = unitval("month", diff / MONTH) else: reltime = unitval("year", diff / YEAR) result['reltime'] = reltime si.outputResults(results) except Exception as e: import traceback stack = traceback.format_exc() si.generateErrorResults("Error '%s'" % e)
owner = settings.get("owner", None) namespace = settings.get("namespace", None) searchhead = '' try: searches = sutils.getCommands(settings.get("search", ''), None) firstcmd = searches[0][0][0] firstarg = searches[0][0][1].strip() if firstcmd == 'search' and firstarg != '*': searchhead = firstarg except Exception, e: pass results = discover(results, searchhead, maxtypes, ignore_covered, useraw) if len(results) == 0: si.addWarnMessage(messages, "Unable to isolate useful groups of events.") except: import traceback stack = traceback.format_exc() results = si.generateErrorResults("Error : Traceback: " + str(stack)) si.outputResults( results, messages ) if __name__ == '__main__': #profileMain() main()