예제 #1
0
    def main(self):
           logger.error("[---] Splunk Debug printing isp Inspect Results: {}".format(inspect.stack()[1])) 
           results, dummyresults, self.settings = isp.getOrganizedResults()
           self.keywords, self.argvals = isp.getKeywordsAndOptions()
           logger.error("[---] Splunk Debug splunklib results: {}".format(self._metadata))
           # in Splunk pre 5.0 we don't get the info, so we just read it from it's standard location
           infoPath = self.settings.get('infoPath', '')
           logger.error("[---] Splunk Debug printing isp stuff inside hsc: {}".format(isp.getOrganizedResults()))
           logger.error("[---] Splunk Debug printing isp keywords and argvals inside hsc: {}".format(isp.getKeywordsAndOptions()))
           if len(infoPath) == 0:
              infoPath = os.path.join(getDispatchDir(self.settings.get('sid'), self.settings.get('sharedStorage', None)), 'info.csv')
           self.info.readFrom(infoPath)


           self.raiseAll = splunk.util.normalizeBoolean(unquote(self.argvals.get('raiseall', 'f')))
           self.sessionKey = self.settings.get('sessionKey', None)
           self.owner      = self.settings.get('owner',      None)
           self.namespace  = self.settings.get('namespace',  None)
           self.krb5_principal = unquote(self.argvals.get('kerberos_principal', '')).strip()


           if len(self.krb5_principal) == 0:
              self.krb5_principal = None
           HadoopEnvManager.init(APP_NAME, 'nobody', self.sessionKey, self.krb5_principal)

           self._main_impl()
예제 #2
0
def main():
    client = actconfig.setup()

    # Parse arguments
    opts, kwargs = Intersplunk.getKeywordsAndOptions()

    if not opts:
        Intersplunk.generateErrorResult(
            "Usage: | actadd <field1> ... <fieldN> [fact_type=<fact type>] [fact_value=<fact value]"
        )
        return

    events, _, _ = Intersplunk.getOrganizedResults()

    # Annotate events
    for event in events:
        object_value = []
        for field in opts:
            if event.get(field):
                object_value.append(event[field])

        if not object_value:
            continue

        event.update(fact_search(client, object_value, **kwargs))

    Intersplunk.outputResults(events)
    def main(self):
        results, dummyresults, self.settings = isp.getOrganizedResults()
        self.keywords, self.argvals = isp.getKeywordsAndOptions()
        logger.info('keywords:' + str(self.keywords))

        # in Splunk pre 5.0 we don't get the info, so we just read it from it's standard location
        infoPath = self.settings.get('infoPath', '')
        if len(infoPath) == 0:
            infoPath = os.path.join(
                getDispatchDir(self.settings.get('sid'),
                               self.settings.get('sharedStorage', None)),
                'info.csv')
        self.info.readFrom(infoPath)

        self.raiseAll = splunk.util.normalizeBoolean(
            unquote(self.argvals.get('raiseall', 'f')))
        self.sessionKey = self.settings.get('sessionKey', None)
        self.owner = self.settings.get('owner', None)
        self.namespace = self.settings.get('namespace', None)
        self.krb5_principal = unquote(
            self.argvals.get('kerberos_principal', '')).strip()

        if len(self.krb5_principal) == 0:
            self.krb5_principal = None
        HadoopEnvManager.init(APP_NAME, 'nobody', self.sessionKey,
                              self.krb5_principal)

        self._main_impl()
예제 #4
0
def main():
    try:
        output_fields = ['_time']
        output_results = []
        search_results, dummyresults, settings = intersplunk.getOrganizedResults(
        )
        if search_results is None or len(search_results) == 0:
            intersplunk.outputResults(output_results, fields=output_fields)
            return

        fields = search_results[0].keys()
        is_field_valid, is_detection_needed = check_fields(fields)
        if not is_field_valid:
            intersplunk.parseError(
                'This visualization requires timestamped, evenly spaced numeric time-series data. Try using the timechart command in your query.'
            )

        if not is_detection_needed:
            intersplunk.outputResults(search_results,
                                      fields=search_results[0].keys())
            return

        output_results, output_fields = wrap_anomaly_detection(search_results)
        intersplunk.outputResults(output_results, fields=output_fields)
    except:
        stack = traceback.format_exc()
        results = intersplunk.generateErrorResults("Error : Traceback: " +
                                                   str(stack))
        intersplunk.outputResults(results)
예제 #5
0
def execute():
    try:
        keywords, argvals = isp.getKeywordsAndOptions()
        results, dummyresults, settings = isp.getOrganizedResults()
        sessionKey = settings.get('sessionKey')

        if sessionKey == None:
            return vixutils.generateErrorResults(
                'sessionKey not passed to the search command, something\'s very wrong!'
            )

        #check that the command is being executed by the scheduler
        sid = settings.get('sid')
        if not sid.startswith('scheduler_') and not argvals.get(
                'forcerun', '') == '1':
            return vixutils.generateErrorResults(
                'rollercontroller is supposed to be ran by the scheduler, add forcerun=1 to force execution'
            )

        # check if error messaging is disabled
        global ERRMSGS_ENABLED
        ERRMSGS_ENABLED = 'disablemsgs' not in keywords

        providers = erp_launcher.listProviders(sessionKey)
        rollVixes = erp_launcher.listVixes(
            sessionKey, 'disabled=0 AND vix.output.buckets.from.indexes=*')
        rollProviders = filterRollProviders(rollVixes, providers)
        searchString = genSearchString(rollVixes, rollProviders)

        kwargs = {}
        for k in ['owner', 'namespace', 'sessionKey', 'hostPath']:
            if k in settings:
                kwargs[k] = settings[k]

        if not os.path.exists(vixutils.getAppBinJars()):
            # first time we're copying jars, force bundle replication
            kwargs['force_bundle_replication'] = 1

        prepareSearchExecution()

        numRetries = argvals.get("retries", 1)

        for i in range(0, int(numRetries)):
            logger.info("Dispatching the search: %s" % searchString)
            search = splunk.search.dispatch(searchString, **kwargs)
            try:
                streamSearch(search, sessionKey)
            finally:
                cancelSearch(search)

    except Exception as e:
        import traceback
        splunkio.write([{
            "stack": traceback.format_exc(),
            "exception": str(e)
        }])
    finally:
        sys.stdout.flush()
예제 #6
0
def getSplunkConf():
   results, dummyresults, settings = isp.getOrganizedResults()
   namespace = settings.get("namespace", None)
   owner = settings.get("owner", None)
   sessionKey = settings.get("sessionKey", None)

   conf = sb.getConf('jira', namespace=namespace, owner=owner, sessionKey=sessionKey)
   stanza = conf.get('jira')

   return stanza
예제 #7
0
def getSplunkConf():
   results, dummyresults, settings = isp.getOrganizedResults()
   namespace = settings.get("namespace", None)
   owner = settings.get("owner", None)
   sessionKey = settings.get("sessionKey", None)

   conf = sb.getConf('jira', namespace=namespace, owner=owner, sessionKey=sessionKey)
   stanza = conf.get('jira')

   return stanza
def getSessionKey():

    results, dummyresults, settings = si.getOrganizedResults()
    sessionKey = settings.get("sessionKey", None)
    if len(sessionKey) == 0:
        sys.stderr.write("Did not receive a session key from splunkd. " +
                         "Please enable passAuth in inputs.conf for this " +
                         "script\n")
        exit(2)
    return sessionKey
예제 #9
0
def main():
    try:
        search_results, dummy_results, settings = intersplunk.getOrganizedResults()
        if len(search_results) > 0:
            output_results = cal_utilization(search_results)
            intersplunk.outputResults(output_results, fields=output_results[0].keys())
    except:
        stack = traceback.format_exc()
        results = intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
        intersplunk.outputResults(results)
예제 #10
0
파일: entity.py 프로젝트: Raboo/chef-splunk
def execute():
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        # default values
        args = {"namespace": "search"}
        # get commandline args
        keywords, options = si.getKeywordsAndOptions()
        # override default args with settings from search kernel
        args.update(settings)
        # override default args with commandline args
        args.update(options)

        sessionKey = args.get("sessionKey", None)
        owner = args.get("owner", "admin")
        namespace = args.get("namespace", None)

        if namespace.lower() == "none":
            namespace = None

        messages = {}

        if sessionKey == None:
            # this shouldn't happen, but it's useful for testing.
            try:
                sessionKey = sa.getSessionKey("admin", "changeme")
                si.addWarnMessage(
                    messages, "No session given to 'tune' command. Using default admin account and password."
                )
            except splunk.AuthenticationFailed, e:
                si.addErrorMessage(messages, "No session given to 'tune' command.")
                return

        if len(keywords) != 1:
            usage()

        # e.g., '/data/inputs/monitor'
        entity = keywords[0]
        logger.info("Entity: %s Args: %s" % (entity, args))

        results = []  # we don't care about incoming results
        try:
            entitys = en.getEntities(entity, sessionKey=sessionKey, owner=owner, namespace=namespace, count=-1)
            for name, entity in entitys.items():
                try:
                    myapp = entity["eai:acl"]["app"]
                    if namespace != None and myapp != namespace:
                        continue
                except:
                    continue  # if no eai:acl/app, filter out
                result = entityToResult(name, entity)
                results.append(result)
        except splunk.ResourceNotFound, e2:
            pass
예제 #11
0
def main():
  try:    
    messages = {}

    keywords,options = si.getKeywordsAndOptions()
    DEFAULT_MAX_TYPES = 10
    maxtypes = options.get('max', str(DEFAULT_MAX_TYPES))

    error = None
    if not maxtypes.isdigit():
        error = 'max must be an integer between 1-%s.' % MAXRESULTS
    else:
        maxtypes = int(maxtypes)
        if not (0 < maxtypes <= MAXRESULTS):
            error = 'max must be an integer between 1-%s.' % MAXRESULTS
    if error:
      si.generateErrorResults(error)
      return

    ignore_covered = 'notcovered' in keywords
    useraw         = 'useraw' in keywords
      
    results,dummyresults,settings = si.getOrganizedResults()
    #for r in results:
    #  for attr in r:
    #     print attr, r[attr], len(r[attr])
    if len(results) > MAXRESULTS:
      results = results[:MAXRESULTS]
      si.addWarnMessage(messages, "For performance reasons, the maximum number of results used to discover event types was capped at %s. Consider a more restrictive search." % MAXRESULTS)

    argc = len(sys.argv)
    argv = sys.argv

    sessionKey  = settings.get("sessionKey", None)
    owner       = settings.get("owner", None)
    namespace   = settings.get("namespace", None)

    searchhead = ''
    try:
      searches = sutils.getCommands(settings.get("search", ''), None)
      firstcmd = searches[0][0][0]
      firstarg = searches[0][0][1].strip()
      if firstcmd == 'search' and firstarg != '*':
        searchhead = firstarg
    except Exception, e:
      pass
    
    results = discover(results, searchhead, maxtypes, ignore_covered, useraw)

    if len(results) == 0:
      si.addWarnMessage(messages, "Unable to isolate useful groups of events.")
예제 #12
0
def main():
    if len(sys.argv) < 3:
        usage()

    tname = sys.argv[1]
    #log("args")
    #for v in sys.argv:
    #    log(v)

    options = ["max_terms", "use_disjunct", "eventsonly"]
    srchargs = []
    log("ARGS: %s" % sys.argv[2:])
    for arg in sys.argv[2:]:
        for option in options:
            if arg.startswith(option):
                break
        else:
            srchargs.append(arg)
    if len(srchargs) == 0:
        usage()

    tsearch = ' '.join(srchargs)
    log("SEARCH: %s" % tsearch)

    results, dummyresults, settings = si.getOrganizedResults()
    results = []  # we don't care about incoming results

    ########TEST#####################
    if 'sessionKey' not in settings:
        settings['owner'] = 'admin'
        settings['password'] = '******'
        settings['namespace'] = 'search'
        settings['sessionKey'] = splunk.auth.getSessionKey('admin', 'changeme')
    ########TEST####################
    kwargs = {}
    for f in ['owner', 'namespace', 'sessionKey', 'hostPath']:
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(
                messages,
                "max_terms must be between 1 and %s.  Using default." %
                MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception, e:
        maxTerms = MAX_SEARCH_COMPLEXITY
예제 #13
0
파일: commons.py 프로젝트: wushenwu/osweep
def return_results(module):
    try:
        results, dummy_results, settings = InterSplunk.getOrganizedResults()

        if isinstance(results, list) and len(results) > 0:
            new_results = module.process_iocs(results)
        elif len(sys.argv) > 1:
            new_results = module.process_iocs(None)
    except:
        stack = traceback.format_exc()
        new_results = InterSplunk.generateErrorResults("Error: " + str(stack))

    InterSplunk.outputResults(new_results)
    return
예제 #14
0
def main():
    if DEBUG:
        logger("main")

    check_port(PROXY_HOST, PROXY_PORT)
    check_port(SNOW_HOST, SNOW_PORT)
    check_port(HEC_HOST, HEC_PORT)
    check_port(SPLUNK_INDEX, SPLUNK_INDEX_PORT)
    verify_log_path(LOG_PATH)

    if DEBUG:
        logger("MAIN: Start of Run")
        print "MAIN: Requests Version", requests.__version__
        print "MAIN: before si call"

    try:
        myresults, dummyresults, settings = si.getOrganizedResults()
    except Exception as ex:
        print datetime.datetime.now(
        ), "SEARCH_RESULTS: ERROR: Call to get Splunk Results failed.  Reason:", ex
        print datetime.datetime.now(
        ), "SPLUNK_SEARCH: Response from Splunk:", str(myresults.text)
        logger("ERROR: Call to get Splunk Results failed.")
        logger(str(myresults.text))
        if DEBUG:
            print datetime.datetime.now(), "SNOW:  Message: ", ex.message
            logger("Message: " + ex.message)
    else:
        for r in myresults:
            if DEBUG:
                print datetime.datetime.now(), "MAIN: r=", r
                #logger("from MAIN: " + str(r))
            SNOW_Event = {}
            SEND_SNOW = True
            for k, v in r.items():
                SNOW_Event[k] = v
                if k == "nosend":
                    print "nosend detected"
                    SEND_SNOW = False

            ### NOTE request to SNOW required data to be of type STR
            if SEND_SNOW:
                send_to_snow(str(SNOW_Event))
            else:
                print datetime.datetime.now(), "NO Send honored."
                logger("NO send honored")

    if DEBUG:
        logger("MAIN: End of Run")
def yamuser():
	try:

#		logger = dcu.getLogger()
#		logger.info("Starting the yamuser command")

		# Get configuration values from jira.conf
		splunk_conf = yammercommon.getSplunkConf()
		
#		logger.root.setLevel(logging.DEBUG)

		local_conf = yammercommon.getLocalConf()

		access_token = local_conf.get('yammercommon', 'access_token')

#		logger.debug("Access Token %s" % access_token)

		yammer = yampy.Yammer(access_token=access_token)

		results, dummyresults, settings = isp.getOrganizedResults()

		keywords, options = isp.getKeywordsAndOptions()

		output_field = options.get('out', 'yammer_user_full_name')
		user_id_fld = options.get('field', 'sender_id')

		#userid = argvals.get("id")

		if results:
			for result in results:
				userid = result[user_id_fld]
				if userid:
					#user = yammer.users.find(userid)
					result[str(output_field)] = "test"
					#user.full_name
		else:
			result={}
			#user = yammer.users.find(userid)
			#result[str(user_name)] = user.full_name
			#results.append(result)

		splunk.Intersplunk.outputResults(results)

	except Exception, e:
		import traceback
		stack =  traceback.format_exc()
		splunk.Intersplunk.generateErrorResults(str(e))
예제 #16
0
def main():
    if len(sys.argv) < 3:
        usage()
        
    tname = sys.argv[1]
    #log("args")
    #for v in sys.argv:
    #    log(v)

    options = ["max_terms", "use_disjunct", "eventsonly"]
    srchargs = []
    log("ARGS: %s" % sys.argv[2:])
    for arg in sys.argv[2:]:
        for option in options:
            if arg.startswith(option):
                break
        else:
            srchargs.append(arg)
    if len(srchargs) == 0:
        usage()

    tsearch = ' '.join(srchargs)
    log("SEARCH: %s" % tsearch)
        
    results,dummyresults,settings = si.getOrganizedResults()
    results = [] # we don't care about incoming results

    ########TEST#####################
    if 'sessionKey' not in settings:
        settings['owner']      = 'admin'
        settings['password']   = '******'
        settings['namespace']  = 'search'
        settings['sessionKey'] = splunk.auth.getSessionKey('admin', 'changeme')
    ########TEST####################
    kwargs = {}
    for f in ['owner','namespace','sessionKey','hostPath']:
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(messages, "max_terms must be between 1 and %s.  Using default." % MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception, e:
        maxTerms = MAX_SEARCH_COMPLEXITY
예제 #17
0
def main():
    p = pdns.PDNS()

    # Parse arguments from splunk search
    opts, kwargs = Intersplunk.getKeywordsAndOptions()
    limit = int(kwargs.get("limit", 25))

    events, _, _ = Intersplunk.getOrganizedResults()

    # Annotate events
    for event in events:
        value = []
        for field in opts:
            if event.get(field):
                value.append(event[field])

        if not value:
            continue

        query = {}
        answer = {}
        for val in value:
            try:
                for res in p.query(val, limit=limit):
                    if res["query"] != value:
                        query[res["query"]] = True
                    if res["answer"] != value:
                        answer[res["answer"]] = True
            except pdns.connectionError as e:
                Intersplunk.parseError(str(e))
                return
            except pdns.resourceLimitExceeded as e:
                Intersplunk.parseError(str(e))
                return

        if query:
            if "query" not in event:
                event["query"] = query.keys()

        if answer:
            if "answer" not in event:
                event["answer"] = answer.keys()

    Intersplunk.outputResults(events)
예제 #18
0
파일: urlhaus.py 프로젝트: lin0x/osweep
def main():
    """ """
    lookup_path = '/opt/splunk/etc/apps/osweep/lookups'
    file_path   = '{}/urlhaus_url_feed.csv'.format(lookup_path)

    if sys.argv[1].lower() == 'feed':
        data_feed = urlhaus.get_feed()
        urlhaus.write_file(data_feed, file_path)
        exit(0)

    try:
        results, dummy_results, settings = InterSplunk.getOrganizedResults()
        
        if isinstance(results, list) and len(results) > 0:
            new_results = process_master(results)
        elif len(sys.argv) > 1:
            new_results = process_master(None)
    except:
        stack = traceback.format_exc()
        new_results = InterSplunk.generateErrorResults("Error: " + str(stack))

    InterSplunk.outputResults(new_results)
    return
예제 #19
0
def main():
    try:
        search_results, dummyresults, settings = intersplunk.getOrganizedResults(
        )
        output_fields = ['_time', '_span']
        output_results = []
        if search_results is None or len(search_results) == 0:
            intersplunk.outputResults(output_results, fields=output_fields)
        else:
            fields = search_results[0].keys()
            detected_fields = list(
                filter(lambda x: x != '_time' and x != '_span', fields))
            search_results_length = range(len(search_results))
            timestamp = [
                int(str(search_results[i]['_time']))
                for i in search_results_length
            ]
            output_results = [{
                '_time': timestamp[i],
                '_span': search_results[i]['_span']
            } for i in search_results_length]
            for cur_field in detected_fields:
                data = [
                    str(search_results[i][cur_field])
                    for i in search_results_length
                ]
                if preprocess(data, timestamp, search_results_length,
                              output_results, cur_field):
                    output_fields.append(cur_field)

            intersplunk.outputResults(output_results, fields=output_fields)
    except:
        stack = traceback.format_exc()
        results = intersplunk.generateErrorResults("Error : Traceback: " +
                                                   str(stack))
        intersplunk.outputResults(results)
예제 #20
0
            for _t in threads:
                _t.join()
            _iops = sum(results)

            bandwidth = int(blocksize*_iops)
            #print " %sB blocks: %6.1f IO/s, %sB/s (%sbit/s)" % (greek(blocksize), _iops, greek(bandwidth, 1), greek(8*bandwidth, 1, 'si'))
	    #print strftime("%Y-%m-%d %H:%M:%S") + " location=%s, capacity=%s, threads=%d, block_size=%s, iops=%s" % (dev, mediasize(dev), num_threads, blocksize, _iops)
            #blocksize *= 2
	    runs-=1
	    now = str(int(time.mktime(time.localtime())))
	    def hello(results, settings):
	    	result = {}
	    	#result['string'] = strftime("%Y-%m-%d %H:%M:%S") + " location=%s, storage_type=%s, file_size_kb=%s, threads=%d, block_size=%s, iops=%s" % (dev, storage_type, file_size_kb, num_threads, blocksize, _iops)
		#results.append({'_time' : now, 'location' : dev, 'run_time_sec' : t, 'storage_type' : storage_type, 'file_size_kb' : file_size_kb, 'threads' : num_threads, 'block_size' : blocksize, 'iops' : _iops})
		results.append({'_time' : now, 'location' : dev, 'run_time_sec' : t, 'threads' : num_threads, 'block_size' : blocksize, 'iops' : _iops})
	    	return results

	    results, dummyresults, settings = si.getOrganizedResults()
	    results = hello(results, settings)
	    si.outputResults(results)

    except IOError, (err_no, err_str):
        raise SystemExit(err_str)
    except KeyboardInterrupt:
        print "caught ctrl-c, bye."

# eof.


예제 #21
0
파일: proc.py 프로젝트: DRArpitha/splunk
def execute():
    
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        keywords, options = si.getKeywordsAndOptions()
        settings.update(options)

        sessionKey = settings.get("sessionKey", None)
        if TESTING and sessionKey == None:
            sessionKey = auth.getSessionKey('admin', 'changeme')
        owner      = settings.get("owner", None)
        namespace  = settings.get("namespace", "search")
        scriptname = settings.get("script", None)
        prerun_str = settings.get("prerun", "True").lower()
        prerun     = prerun_str.startswith('t') or prerun_str.startswith('y') or prerun_str.startswith('1')

        log("sessionKey %s owner %s namespace %s script %s prerun %s" % (sessionKey, owner, namespace, scriptname, prerun))
        
        if scriptname == None:
            raise Exception('"script" value required')
        if ".." in scriptname or "/" in scriptname or "\\" in scriptname:
            raise Exception('pathname cannot contain cannot contain "..", "/", or "\\".')
        home = si.splunkHome()
        localpath = os.path.join('etc', 'apps', namespace, 'scripts', scriptname + ".ss")
        pathname = os.path.join(home, localpath)
        if not os.path.exists(pathname):
            raise Exception('script path does not exist: "%s"' % os.path.join("SPLUNK_HOME", localpath))

        log("pathname %s" % (pathname))

        real_stdout = sys.stdout          
        if CAN_STREAM_RESULTS_ANY_TIME:
            # output results immediately to stdout            
            result_stream = sys.stdout  
        else:
            # output results once all done
            result_stream = StringIO.StringIO()

        # capture debugging stdout to StringIO, but have real stdout used for outputting results as streamed
        sys.stdout = StringIO.StringIO()
        
        script = scripting.Script(sessionKey, owner, namespace, path=pathname, prerunfix=prerun, outputstream=result_stream)
        side_effects = script.run()

        
        log("side_effects %s" % (side_effects))

        # output non-results -- variables and print statements from scripts
        sys.stdout.flush()
        messages = {}
        si.addInfoMessage(messages, "Variable values: %s" % side_effects)
        si.addInfoMessage(messages, "Standard output: %s" % sys.stdout.getvalue())

        # reset stdout
        sys.stdout = real_stdout
        OUTPUT_MSGS = True
        if OUTPUT_MSGS:
            # si.outputResults(None, messages)
            for level, messages in messages.items():
                for msg in messages:
                    print "%s=%s" % (level, normalizeMsg(msg))
            print

        # we haven't output results yet.  do it now.
        if not CAN_STREAM_RESULTS_ANY_TIME:
            result_stream.flush()
            print result_stream.getvalue()


    except Exception, e:
        sys.stdout = real_stdout        
        import traceback
        msg = "%s. Traceback: %s" % (e, traceback.format_exc())
        log("error %s" % msg)
        si.generateErrorResults(msg)
예제 #22
0
파일: jpath.py 프로젝트: Kintyre/jmespath
def jpath():
    try:
        keywords, options = si.getKeywordsAndOptions()
        legacy_args_fixer(options)

        defaultval = options.get('default', None)
        fn_input = options.get('input', options.get('field', '_raw'))
        fn_output = options.get('output', 'jpath')
        if len(keywords) != 1:
            si.generateErrorResults('Requires exactly one path argument.')
            sys.exit(0)
        path = keywords[0]

        # Handle literal (escaped) quotes.  Presumably necessary because of raw args?
        path = path.replace(r'\"', '"')

        if "*" in fn_output:
            apply_output = output_to_wildcard
        else:
            apply_output = output_to_field

        try:
            jp = jmespath.compile(path)
        except ParseError as e:
            # Todo:  Consider stripping off the last line "  ^" pointing to the issue.
            # Not helpful since Splunk wraps the error message in a really ugly way.
            si.generateErrorResults(
                "Invalid JMESPath expression '{}'. {}".format(path, e))
            sys.exit(0)

        results, dummyresults, settings = si.getOrganizedResults()
        # for each results
        for result in results:
            # get field value
            ojson = result.get(fn_input, None)
            added = False
            if ojson is not None:
                if isinstance(ojson, (list, tuple)):
                    # XXX: Add proper support for multivalue input fields.  Just use first value for now
                    ojson = ojson[0]
                try:
                    json_obj = json.loads(ojson)
                except ValueError:
                    # Invalid JSON.  Move on, nothing to see here.
                    continue
                try:
                    values = jp.search(json_obj, options=jp_options)
                    apply_output(values, fn_output, result)
                    result[ERROR_FIELD] = None
                    added = True
                except UnknownFunctionError as e:
                    # Can't detect invalid function names during the compile, but we want to treat
                    # these like syntax errors:  Stop processing immediately
                    si.generateErrorResults(
                        "Issue with JMESPath expression. {}".format(e))
                    sys.exit(0)
                except JMESPathError as e:
                    # Not 100% sure I understand what these errors mean. Should they halt?
                    result[ERROR_FIELD] = "JMESPath error: {}".format(e)
                except Exception as e:
                    result[ERROR_FIELD] = "Exception: {}".format(e)

            if not added and defaultval is not None:
                result[fn_output] = defaultval

        si.outputResults(results)
    except Exception as e:
        import traceback

        stack = traceback.format_exc()
        si.generateErrorResults("Error '%s'. %s" % (e, stack))
예제 #23
0
def execute():

    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        keywords, options = si.getKeywordsAndOptions()
        settings.update(options)

        sessionKey = settings.get("sessionKey", None)
        if TESTING and sessionKey == None:
            sessionKey = auth.getSessionKey('admin', 'changeme')
        owner = settings.get("owner", None)
        namespace = settings.get("namespace", "search")
        scriptname = settings.get("script", None)
        prerun_str = settings.get("prerun", "True").lower()
        prerun = prerun_str.startswith('t') or prerun_str.startswith(
            'y') or prerun_str.startswith('1')

        log("sessionKey %s owner %s namespace %s script %s prerun %s" %
            (sessionKey, owner, namespace, scriptname, prerun))

        if scriptname == None:
            raise Exception('"script" value required')
        if ".." in scriptname or "/" in scriptname or "\\" in scriptname:
            raise Exception(
                'pathname cannot contain cannot contain "..", "/", or "\\".')
        home = si.splunkHome()
        localpath = os.path.join('etc', 'apps', namespace, 'scripts',
                                 scriptname + ".ss")
        pathname = os.path.join(home, localpath)
        if not os.path.exists(pathname):
            raise Exception('script path does not exist: "%s"' %
                            os.path.join("SPLUNK_HOME", localpath))

        log("pathname %s" % (pathname))

        real_stdout = sys.stdout
        if CAN_STREAM_RESULTS_ANY_TIME:
            # output results immediately to stdout
            result_stream = sys.stdout
        else:
            # output results once all done
            result_stream = StringIO.StringIO()

        # capture debugging stdout to StringIO, but have real stdout used for outputting results as streamed
        sys.stdout = StringIO.StringIO()

        script = scripting.Script(sessionKey,
                                  owner,
                                  namespace,
                                  path=pathname,
                                  prerunfix=prerun,
                                  outputstream=result_stream)
        side_effects = script.run()

        log("side_effects %s" % (side_effects))

        # output non-results -- variables and print statements from scripts
        sys.stdout.flush()
        messages = {}
        si.addInfoMessage(messages, "Variable values: %s" % side_effects)
        si.addInfoMessage(messages,
                          "Standard output: %s" % sys.stdout.getvalue())

        # reset stdout
        sys.stdout = real_stdout
        OUTPUT_MSGS = True
        if OUTPUT_MSGS:
            # si.outputResults(None, messages)
            for level, messages in messages.items():
                for msg in messages:
                    print "%s=%s" % (level, normalizeMsg(msg))
            print

        # we haven't output results yet.  do it now.
        if not CAN_STREAM_RESULTS_ANY_TIME:
            result_stream.flush()
            print result_stream.getvalue()

    except Exception, e:
        sys.stdout = real_stdout
        import traceback
        msg = "%s. Traceback: %s" % (e, traceback.format_exc())
        log("error %s" % msg)
        si.generateErrorResults(msg)
예제 #24
0

if __name__ == '__main__':
    try:
        keywords, options = si.getKeywordsAndOptions()
        defaultval = options.get('default', None)
        field = options.get('field', '_raw')
        outfield = options.get('outfield', 'xpath')
        if len(keywords) != 1:
            si.generateErrorResults('Requires exactly one path argument.')
            exit(0)
        path = keywords[0]
        # Support for searching with absolute path
        if len(path) > 1 and path[0] == '/' and path[1] != '/':
            path = '/data' + path
        results, dummyresults, settings = si.getOrganizedResults()
        # for each results
        for result in results:
            # get field value
            myxml = result.get(field, None)
            added = False
            if myxml != None:
                # make event value valid xml
                myxml = "<data>%s</data>" % myxml
                try:
                    et = etree.parse(StringIO.StringIO(myxml))
                    nodes = et.xpath(path)
                    values = [tostr(node) for node in nodes]
                    result[outfield] = values
                    added = True
                except Exception, e:
예제 #25
0
def main():
    if len(sys.argv) < 3:
        usage()

    tname = sys.argv[1]
    #log("args")
    #for v in sys.argv:
    #    log(v)

    options = ["max_terms", "use_disjunct", "eventsonly"]
    srchargs = []
    log("ARGS: %s" % sys.argv[2:])
    for arg in sys.argv[2:]:
        for option in options:
            if arg.startswith(option):
                break
        else:
            srchargs.append(arg)
    if len(srchargs) == 0:
        usage()

    tsearch = ' '.join(srchargs)
    log("SEARCH: %s" % tsearch)

    results, dummyresults, settings = si.getOrganizedResults()
    results = []  # we don't care about incoming results

    ########TEST#####################
    if 'sessionKey' not in settings:
        settings['owner'] = 'admin'
        settings['password'] = '******'
        settings['namespace'] = 'search'
        settings['sessionKey'] = splunk.auth.getSessionKey('admin', 'changeme')
    ########TEST####################
    kwargs = {}
    for f in ['owner', 'namespace', 'sessionKey', 'hostPath']:
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(
                messages,
                "max_terms must be between 1 and %s.  Using default." %
                MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception as e:
        maxTerms = MAX_SEARCH_COMPLEXITY

    dummy, options = si.getKeywordsAndOptions()
    makeORs = isTrue(options.get("use_disjunct", "t"))
    eventsOnly = isTrue(options.get("eventsonly", "f"))

    log("MAXTERMS: %s MAKEORS: %s eventsOnly: %s" %
        (maxTerms, makeORs, eventsOnly))
    log("tsearch: %s" % tsearch)

    results = []
    try:
        results = findTransaction(tname, tsearch, makeORs, eventsOnly,
                                  maxTerms, messages, **kwargs)
    except Exception as e:
        error(e)

    events = []
    log("RESULTS: %s" % len(results))
    for result in results:  # api fail
        event = {}
        for field in result:
            if field == '_time':
                event['_time'] = util.dt2epoch(
                    util.parseISO(str(result['_time'])))
            else:
                event[field] = result[field]
        events.append(event)

    si.outputResults(events, messages)
from addon.sync_task import SyncTask
import utils.app_util as util

logger = util.get_logger()

# results of spl
results = []

SYNC_ACCOUNTS_TASK = 'sync_accounts'
SYNC_MACROS_TASK = 'sync_macros'

ALL_TASKS = set([SYNC_ACCOUNTS_TASK, SYNC_MACROS_TASK])


try:
    results,dummyresults,settings = intersplunk.getOrganizedResults()
    session_key = settings['sessionKey']

    tasks = set()

    if len(sys.argv) == 1:
        tasks = ALL_TASKS
    else:
        for task in sys.argv:
            task = task.lower()
            if task in ALL_TASKS:
                tasks.add(task)

    sync_task = SyncTask(session_key)

    # 1. sync accounts
예제 #27
0
def main():
    try:
        search_results, dummyresults, settings = intersplunk.getOrganizedResults(
        )
        session_key = settings['sessionKey']
        if len(sys.argv) == 2:
            # update aws price info
            if sys.argv[1] == 'info':
                task = AwsInfoTask(session_key)
                task.execute()
        elif len(sys.argv) == 5:
            # obtain price detail
            region = sys.argv[1]
            instance_type = sys.argv[2]
            product_os = sys.argv[3]
            tenancy = sys.argv[4]
            on_demand_hourly, reserved_one_all_yearly, reserved_one_partial_yearly, reserved_one_partial_hourly, reserved_one_no_hourly, currency = read_price(
                region, instance_type, product_os, tenancy, session_key)

            intersplunk.outputResults([{
                PRICE_ON_DEMAND_HOURLY: on_demand_hourly,
                PRICE_RESERVED_ONE_ALL_YEARLY: reserved_one_all_yearly,
                PRICE_RESERVED_ONE_PARTIAL_YEARLY: reserved_one_partial_yearly,
                PRICE_RESERVED_ONE_PARTIAL_HOURLY: reserved_one_partial_hourly,
                PRICE_RESERVED_ONE_NO_HOURLY: reserved_one_no_hourly,
                CURRENCY: currency
            }],
                                      fields=[
                                          PRICE_ON_DEMAND_HOURLY,
                                          PRICE_RESERVED_ONE_ALL_YEARLY,
                                          PRICE_RESERVED_ONE_PARTIAL_YEARLY,
                                          PRICE_RESERVED_ONE_PARTIAL_HOURLY,
                                          PRICE_RESERVED_ONE_NO_HOURLY,
                                          CURRENCY
                                      ])
        elif len(sys.argv) == 7:
            # calculate optimal RI, RI cost and on demand cost
            base = sys.argv[1]
            region = sys.argv[2]
            instance_type = sys.argv[3]
            purchase_option = sys.argv[4]
            product_os = sys.argv[5]
            tenancy = sys.argv[6]

            valid_days, message = get_valid_days_from_conf(session_key)
            if valid_days < 0:
                ri = 'N/A'
                ri_cost = 'N/A'
                instance_hours = []
                on_demand_hourly = 0
                currency = '$' if re.match(
                    r'cn-.*', region) == None else '\xc2\xa5'.decode('utf8')
            else:
                history_len, instance_hours = get_instance_hours(
                    base, search_results)
                # read price
                on_demand_hourly, reserved_one_all_yearly, reserved_one_partial_yearly, reserved_one_partial_hourly, reserved_one_no_hourly, currency = read_price(
                    region, instance_type, product_os, tenancy, session_key)

                if valid_days * HOURS_OF_DAY > history_len:
                    ri = 'N/A'
                    ri_cost = 'N/A'
                    message = 'It\'s required to have %d days\' data at least. You can update the setting in recommendation.conf' % (
                        valid_days)
                else:
                    if purchase_option == 'all':
                        ri, ri_cost, message = ri_wrap(
                            instance_hours, on_demand_hourly,
                            reserved_one_all_yearly / HOURS_OF_YEAR)
                    elif purchase_option == 'partial':
                        ri, ri_cost, message = ri_wrap(
                            instance_hours, on_demand_hourly,
                            reserved_one_partial_yearly / HOURS_OF_YEAR +
                            reserved_one_partial_hourly)
                    else:
                        ri, ri_cost, message = ri_wrap(instance_hours,
                                                       on_demand_hourly,
                                                       reserved_one_no_hourly)

            instance_hours_len = max(1, len(instance_hours))
            outputResults = []
            cur_line = {}
            cur_line[ON_DEMAND_COST] = int(
                round(on_demand_hourly * sum(instance_hours) /
                      instance_hours_len * HOURS_OF_YEAR))  # on demand cost
            cur_line[RI] = ri
            cur_line[RI_COST] = 'N/A' if ri_cost == 'N/A' else int(
                round(ri_cost / instance_hours_len * HOURS_OF_YEAR))  # RI cost
            cur_line[MESSAGE] = message
            cur_line[CURRENCY] = currency
            outputResults.append(cur_line)
            intersplunk.outputResults(
                outputResults,
                fields=[RI, RI_COST, ON_DEMAND_COST, MESSAGE, CURRENCY])
        else:
            intersplunk.parseError(
                "Arguments should be recommendation base, AZ, instance type, purchase option, os and tenancy."
            )
    except:
        stack = traceback.format_exc()
        results = intersplunk.generateErrorResults("Error : Traceback: " +
                                                   str(stack))
        intersplunk.outputResults(results)
예제 #28
0
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 30 * DAY
YEAR = 12 * MONTH

# handle plurals nicely
def unitval(unit, val):
    plural = ""
    if val >= 2: plural = "s"
    return "%s %s%s ago" % (int(val), unit, plural)

if __name__ == '__main__':
    try:
        keywords,options = si.getKeywordsAndOptions()
        results,dumb1, dumb2 = si.getOrganizedResults()

        now = time.time()
        # for each result
        for result in results:
            utc = result.get('_time', None)
            if utc == None:
                reltime = "unknown"
            else:
                diff = int(now - float(utc))
                if diff < -60:
                    reltime = "future"
                elif diff < 0: # handle weird case of client clock off slightly
                    reltime = "now"
                elif diff == 0:
                    reltime = "now"
예제 #29
0
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 30 * DAY
YEAR = 12 * MONTH

# handle plurals nicely
def unitval(unit, val):
    plural = ""
    if val >= 2: plural = "s"
    return "%s %s%s ago" % (int(val), unit, plural)

if __name__ == '__main__':
    try:
        keywords,options = si.getKeywordsAndOptions()
        results,dumb1, dumb2 = si.getOrganizedResults()

        now = time.time()
        # for each result
        for result in results:
            utc = result.get('_time', None)
            if isinstance(utc, list):
                reltime = "unknown"
            elif utc == None:
                reltime = "unknown"
            else:
                diff = int(now - float(utc))
                if diff < -60:
                    reltime = "future"
                elif diff < 0: # handle weird case of client clock off slightly
                    reltime = "now"
예제 #30
0
def execute():
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        # default values
        args = {'namespace': 'search'}
        # get commandline args
        keywords, options = si.getKeywordsAndOptions()
        # override default args with settings from search kernel
        args.update(settings)
        # override default args with commandline args
        args.update(options)

        sessionKey = args.get("sessionKey", None)
        owner = args.get("owner", 'admin')
        namespace = args.get("namespace", None)

        if namespace.lower() == "none":
            namespace = None

        messages = {}

        if sessionKey == None:
            # this shouldn't happen, but it's useful for testing.
            try:
                sessionKey = sa.getSessionKey('admin', 'changeme')
                si.addWarnMessage(
                    messages,
                    "No session given to 'tune' command. Using default admin account and password."
                )
            except splunk.AuthenticationFailed, e:
                si.addErrorMessage(messages,
                                   "No session given to 'tune' command.")
                return

        if len(keywords) != 1:
            usage()

        # e.g., '/data/inputs/monitor'
        entity = keywords[0]
        logger.info("Entity: %s Args: %s" % (entity, args))

        results = []  # we don't care about incoming results
        try:
            entitys = en.getEntities(entity,
                                     sessionKey=sessionKey,
                                     owner=owner,
                                     namespace=namespace,
                                     count=-1)
            for name, entity in entitys.items():
                try:
                    myapp = entity["eai:acl"]["app"]
                    if namespace != None and myapp != namespace:
                        continue
                except:
                    continue  # if no eai:acl/app, filter out
                result = entityToResult(name, entity)
                results.append(result)
        except splunk.ResourceNotFound, e2:
            pass