Beispiel #1
0
    def main(self):
           logger.error("[---] Splunk Debug printing isp Inspect Results: {}".format(inspect.stack()[1])) 
           results, dummyresults, self.settings = isp.getOrganizedResults()
           self.keywords, self.argvals = isp.getKeywordsAndOptions()
           logger.error("[---] Splunk Debug splunklib results: {}".format(self._metadata))
           # in Splunk pre 5.0 we don't get the info, so we just read it from it's standard location
           infoPath = self.settings.get('infoPath', '')
           logger.error("[---] Splunk Debug printing isp stuff inside hsc: {}".format(isp.getOrganizedResults()))
           logger.error("[---] Splunk Debug printing isp keywords and argvals inside hsc: {}".format(isp.getKeywordsAndOptions()))
           if len(infoPath) == 0:
              infoPath = os.path.join(getDispatchDir(self.settings.get('sid'), self.settings.get('sharedStorage', None)), 'info.csv')
           self.info.readFrom(infoPath)


           self.raiseAll = splunk.util.normalizeBoolean(unquote(self.argvals.get('raiseall', 'f')))
           self.sessionKey = self.settings.get('sessionKey', None)
           self.owner      = self.settings.get('owner',      None)
           self.namespace  = self.settings.get('namespace',  None)
           self.krb5_principal = unquote(self.argvals.get('kerberos_principal', '')).strip()


           if len(self.krb5_principal) == 0:
              self.krb5_principal = None
           HadoopEnvManager.init(APP_NAME, 'nobody', self.sessionKey, self.krb5_principal)

           self._main_impl()
def getArgs():
    fields, argvals = si.getKeywordsAndOptions()
    url = argvals.get("url", "null")
    username = argvals.get("username", "null")
    if url == 'null':
        url = "https://api.github.com/feeds"
    return url, username
Beispiel #3
0
def main():
    client = actconfig.setup()

    # Parse arguments
    opts, kwargs = Intersplunk.getKeywordsAndOptions()

    if not opts:
        Intersplunk.generateErrorResult(
            "Usage: | actadd <field1> ... <fieldN> [fact_type=<fact type>] [fact_value=<fact value]"
        )
        return

    events, _, _ = Intersplunk.getOrganizedResults()

    # Annotate events
    for event in events:
        object_value = []
        for field in opts:
            if event.get(field):
                object_value.append(event[field])

        if not object_value:
            continue

        event.update(fact_search(client, object_value, **kwargs))

    Intersplunk.outputResults(events)
    def main(self):
        results, dummyresults, self.settings = isp.getOrganizedResults()
        self.keywords, self.argvals = isp.getKeywordsAndOptions()
        logger.info('keywords:' + str(self.keywords))

        # in Splunk pre 5.0 we don't get the info, so we just read it from it's standard location
        infoPath = self.settings.get('infoPath', '')
        if len(infoPath) == 0:
            infoPath = os.path.join(
                getDispatchDir(self.settings.get('sid'),
                               self.settings.get('sharedStorage', None)),
                'info.csv')
        self.info.readFrom(infoPath)

        self.raiseAll = splunk.util.normalizeBoolean(
            unquote(self.argvals.get('raiseall', 'f')))
        self.sessionKey = self.settings.get('sessionKey', None)
        self.owner = self.settings.get('owner', None)
        self.namespace = self.settings.get('namespace', None)
        self.krb5_principal = unquote(
            self.argvals.get('kerberos_principal', '')).strip()

        if len(self.krb5_principal) == 0:
            self.krb5_principal = None
        HadoopEnvManager.init(APP_NAME, 'nobody', self.sessionKey,
                              self.krb5_principal)

        self._main_impl()
def parse_pstacks():

    results = []
    keywords, options = si.getKeywordsAndOptions()

    separator = options.get('separator', DEFAULT_SEPARATOR)
    fileorderindex = int(options.get('fileorderindex', DEFAULT_FOI))
    timeorderindex = int(options.get('timeorderindex', DEFAULT_TSI))

    if len(keywords)==0:
        raise Exception("requires path to pstack file(s)")

    gpath = keywords.pop(0)
    gpath = gpath.replace("\\\\", "\\")
    gpath = gpath.replace('\[', '[')
    gpath = gpath.replace('\]', ']')
    # find all files matching
    complete_path = os.path.expanduser(
        os.path.expandvars(gpath))
    glob_matches = glob.glob(complete_path)
    logger.error("complete path: %s" % complete_path)
    logger.error("glob matches: %s" % glob_matches)

    if len(glob_matches)==0:
        logger.error("No file matching %s" % complete_path)
        raise Exception("No files matching %s." % complete_path)


    for pfile in glob_matches:
        logger.error("parsing file: %s" % pfile)
        results += parse_pstack_file(pfile, separator, fileorderindex, timeorderindex)


    #return results
    return results
Beispiel #6
0
def execute():
    try:
        keywords, argvals = isp.getKeywordsAndOptions()
        results, dummyresults, settings = isp.getOrganizedResults()
        sessionKey = settings.get('sessionKey')

        if sessionKey == None:
            return vixutils.generateErrorResults(
                'sessionKey not passed to the search command, something\'s very wrong!'
            )

        #check that the command is being executed by the scheduler
        sid = settings.get('sid')
        if not sid.startswith('scheduler_') and not argvals.get(
                'forcerun', '') == '1':
            return vixutils.generateErrorResults(
                'rollercontroller is supposed to be ran by the scheduler, add forcerun=1 to force execution'
            )

        # check if error messaging is disabled
        global ERRMSGS_ENABLED
        ERRMSGS_ENABLED = 'disablemsgs' not in keywords

        providers = erp_launcher.listProviders(sessionKey)
        rollVixes = erp_launcher.listVixes(
            sessionKey, 'disabled=0 AND vix.output.buckets.from.indexes=*')
        rollProviders = filterRollProviders(rollVixes, providers)
        searchString = genSearchString(rollVixes, rollProviders)

        kwargs = {}
        for k in ['owner', 'namespace', 'sessionKey', 'hostPath']:
            if k in settings:
                kwargs[k] = settings[k]

        if not os.path.exists(vixutils.getAppBinJars()):
            # first time we're copying jars, force bundle replication
            kwargs['force_bundle_replication'] = 1

        prepareSearchExecution()

        numRetries = argvals.get("retries", 1)

        for i in range(0, int(numRetries)):
            logger.info("Dispatching the search: %s" % searchString)
            search = splunk.search.dispatch(searchString, **kwargs)
            try:
                streamSearch(search, sessionKey)
            finally:
                cancelSearch(search)

    except Exception as e:
        import traceback
        splunkio.write([{
            "stack": traceback.format_exc(),
            "exception": str(e)
        }])
    finally:
        sys.stdout.flush()
Beispiel #7
0
def execute():
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        # default values
        args = {"namespace": "search"}
        # get commandline args
        keywords, options = si.getKeywordsAndOptions()
        # override default args with settings from search kernel
        args.update(settings)
        # override default args with commandline args
        args.update(options)

        sessionKey = args.get("sessionKey", None)
        owner = args.get("owner", "admin")
        namespace = args.get("namespace", None)

        if namespace.lower() == "none":
            namespace = None

        messages = {}

        if sessionKey == None:
            # this shouldn't happen, but it's useful for testing.
            try:
                sessionKey = sa.getSessionKey("admin", "changeme")
                si.addWarnMessage(
                    messages, "No session given to 'tune' command. Using default admin account and password."
                )
            except splunk.AuthenticationFailed, e:
                si.addErrorMessage(messages, "No session given to 'tune' command.")
                return

        if len(keywords) != 1:
            usage()

        # e.g., '/data/inputs/monitor'
        entity = keywords[0]
        logger.info("Entity: %s Args: %s" % (entity, args))

        results = []  # we don't care about incoming results
        try:
            entitys = en.getEntities(entity, sessionKey=sessionKey, owner=owner, namespace=namespace, count=-1)
            for name, entity in entitys.items():
                try:
                    myapp = entity["eai:acl"]["app"]
                    if namespace != None and myapp != namespace:
                        continue
                except:
                    continue  # if no eai:acl/app, filter out
                result = entityToResult(name, entity)
                results.append(result)
        except splunk.ResourceNotFound, e2:
            pass
Beispiel #8
0
def main():
    client = actconfig.setup()

    # Parse arguments from splunk search
    opts, kwargs = Intersplunk.getKeywordsAndOptions()

    results = []

    if opts and "keywords" not in kwargs:
        kwargs["keywords"] = " ".join(opts)

    results += fact_search(client, **kwargs)
    Intersplunk.outputResults(results)
Beispiel #9
0
def main():
  try:    
    messages = {}

    keywords,options = si.getKeywordsAndOptions()
    DEFAULT_MAX_TYPES = 10
    maxtypes = options.get('max', str(DEFAULT_MAX_TYPES))

    error = None
    if not maxtypes.isdigit():
        error = 'max must be an integer between 1-%s.' % MAXRESULTS
    else:
        maxtypes = int(maxtypes)
        if not (0 < maxtypes <= MAXRESULTS):
            error = 'max must be an integer between 1-%s.' % MAXRESULTS
    if error:
      si.generateErrorResults(error)
      return

    ignore_covered = 'notcovered' in keywords
    useraw         = 'useraw' in keywords
      
    results,dummyresults,settings = si.getOrganizedResults()
    #for r in results:
    #  for attr in r:
    #     print attr, r[attr], len(r[attr])
    if len(results) > MAXRESULTS:
      results = results[:MAXRESULTS]
      si.addWarnMessage(messages, "For performance reasons, the maximum number of results used to discover event types was capped at %s. Consider a more restrictive search." % MAXRESULTS)

    argc = len(sys.argv)
    argv = sys.argv

    sessionKey  = settings.get("sessionKey", None)
    owner       = settings.get("owner", None)
    namespace   = settings.get("namespace", None)

    searchhead = ''
    try:
      searches = sutils.getCommands(settings.get("search", ''), None)
      firstcmd = searches[0][0][0]
      firstarg = searches[0][0][1].strip()
      if firstcmd == 'search' and firstarg != '*':
        searchhead = firstarg
    except Exception, e:
      pass
    
    results = discover(results, searchhead, maxtypes, ignore_covered, useraw)

    if len(results) == 0:
      si.addWarnMessage(messages, "Unable to isolate useful groups of events.")
def yamuser():
	try:

#		logger = dcu.getLogger()
#		logger.info("Starting the yamuser command")

		# Get configuration values from jira.conf
		splunk_conf = yammercommon.getSplunkConf()
		
#		logger.root.setLevel(logging.DEBUG)

		local_conf = yammercommon.getLocalConf()

		access_token = local_conf.get('yammercommon', 'access_token')

#		logger.debug("Access Token %s" % access_token)

		yammer = yampy.Yammer(access_token=access_token)

		results, dummyresults, settings = isp.getOrganizedResults()

		keywords, options = isp.getKeywordsAndOptions()

		output_field = options.get('out', 'yammer_user_full_name')
		user_id_fld = options.get('field', 'sender_id')

		#userid = argvals.get("id")

		if results:
			for result in results:
				userid = result[user_id_fld]
				if userid:
					#user = yammer.users.find(userid)
					result[str(output_field)] = "test"
					#user.full_name
		else:
			result={}
			#user = yammer.users.find(userid)
			#result[str(user_name)] = user.full_name
			#results.append(result)

		splunk.Intersplunk.outputResults(results)

	except Exception, e:
		import traceback
		stack =  traceback.format_exc()
		splunk.Intersplunk.generateErrorResults(str(e))
Beispiel #11
0
def main():
    p = pdns.PDNS()

    # Parse arguments from splunk search
    opts, kwargs = Intersplunk.getKeywordsAndOptions()
    limit = int(kwargs.get("limit", 25))

    events, _, _ = Intersplunk.getOrganizedResults()

    # Annotate events
    for event in events:
        value = []
        for field in opts:
            if event.get(field):
                value.append(event[field])

        if not value:
            continue

        query = {}
        answer = {}
        for val in value:
            try:
                for res in p.query(val, limit=limit):
                    if res["query"] != value:
                        query[res["query"]] = True
                    if res["answer"] != value:
                        answer[res["answer"]] = True
            except pdns.connectionError as e:
                Intersplunk.parseError(str(e))
                return
            except pdns.resourceLimitExceeded as e:
                Intersplunk.parseError(str(e))
                return

        if query:
            if "query" not in event:
                event["query"] = query.keys()

        if answer:
            if "answer" not in event:
                event["answer"] = answer.keys()

    Intersplunk.outputResults(events)
def raw_pstack():

    results = []
    keywords, options = si.getKeywordsAndOptions()

    separator = options.get('separator', DEFAULT_SEPARATOR)
    fileorderindex = int(options.get('fileorderindex', DEFAULT_FOI))
    thread_id = options.get('threadid', DEFAULT_THREADID)
    reverse = options.get('reverse', DEFAULT_REVERSE)
    timeorderindex = int(options.get('timeorderindex', DEFAULT_TSI))


    if len(keywords)==0:
        raise Exception("requires path to pstack file(s)")


    gpath = keywords.pop(0).strip()
    logger.error("b4 gpath = %s" % gpath)
    gpath = gpath.replace("\\\\", "\\")
    gpath = gpath.replace("\[", "[")
    gpath = gpath.replace("\]", "]")
    logger.error("gpath = %s" % gpath)
    # find all files matching
    complete_path = os.path.expanduser(
        os.path.expandvars(gpath))
    glob_matches = glob.glob(complete_path)
    logger.debug("complete path: %s" % complete_path)
    logger.debug("glob matches: %s" % glob_matches)

    if len(glob_matches)==0:
        logger.error("No file matching %s" % complete_path)
        raise Exception("No files matching %s." % complete_path)


    for pfile in glob_matches:
        logger.error("parsing file: %s" % pfile)
        results += parse_raw_pstack(pfile, thread_id, reverse, separator, fileorderindex, timeorderindex)


    #return results
    return results
Beispiel #13
0
def main():
    p = pdns.PDNS()

    # Parse arguments from splunk search
    opts, kwargs = Intersplunk.getKeywordsAndOptions()

    # Get limit from kwargs, but default to 25 if not specified
    limit = int(kwargs.get("limit", 25))

    results = []

    for value in opts:
        try:
            result = p.query(value, limit = limit)
        except pdns.connectionError as e:
            Intersplunk.parseError(str(e))
            return
        except pdns.resourceLimitExceeded as e:
            Intersplunk.parseError(str(e))
            return

        results += result
    Intersplunk.outputResults(results)
Beispiel #14
0
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(
                messages,
                "max_terms must be between 1 and %s.  Using default." %
                MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception, e:
        maxTerms = MAX_SEARCH_COMPLEXITY

    dummy, options = si.getKeywordsAndOptions()
    makeORs = isTrue(options.get("use_disjunct", "t"))
    eventsOnly = isTrue(options.get("eventsonly", "f"))

    log("MAXTERMS: %s MAKEORS: %s eventsOnly: %s" %
        (maxTerms, makeORs, eventsOnly))
    log("tsearch: %s" % tsearch)

    results = []
    try:
        results = findTransaction(tname, tsearch, makeORs, eventsOnly,
                                  maxTerms, messages, **kwargs)
    except Exception, e:
        error(e)

    events = []
Beispiel #15
0
import hashlib
import splunk.Intersplunk as si

if __name__ == '__main__':
    try:
        keywords,options = si.getKeywordsAndOptions()
        if len(keywords) == 0:
            si.generateErrorResults('Requires fields list.')
            exit(0)
        search = ' '.join(keywords)
        results,dummyresults,settings = si.getOrganizedResults()

        for result in results:
                eventSignature = '-=XXX=-'.join([result.get(field,'') for field in keywords])
                sigHash = hashlib.md5(eventSignature).hexdigest()
                result['_icon'] = sigHash
        si.outputResults(results)
    except Exception, e:
        import traceback
        stack =  traceback.format_exc()
        si.generateErrorResults("Error '%s'. %s" % (e, stack))


Beispiel #16
0
        rex = "^(?:t|true|1|yes)$"

    if (rex is None and arg in argvals) or (arg in argvals
                                            and re.match(rex, argvals[arg])):
        result = True
    return result


if __name__ == '__main__':
    logger = setup_logging()
    logger.info('starting..')
    eStart = time.time()
    try:

        results = si.readResults(None, None, False)
        keywords, argvals = si.getKeywordsAndOptions()
        validate_args(keywords, argvals)

        if results is not None and len(results) > 0:
            argvals = make_arg_sub_based_results(argvals, results)

        # if api_key argument is passed to command, use it instead of default
        if arg_on_and_enabled(argvals, "auth_token"):
            QUANDL_AUTH_TOKEN = argvals["auth_token"]
            logger.debug('setting QUANDL_AUTH_TOKEN="%s"' %
                         str(QUANDL_AUTH_TOKEN))

        if arg_on_and_enabled(argvals, "debug", is_bool=True):
            logger.setLevel(logging.DEBUG)

        quandl_code = keywords[0]
Beispiel #17
0
def execute():
    
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        keywords, options = si.getKeywordsAndOptions()
        settings.update(options)

        sessionKey = settings.get("sessionKey", None)
        if TESTING and sessionKey == None:
            sessionKey = auth.getSessionKey('admin', 'changeme')
        owner      = settings.get("owner", None)
        namespace  = settings.get("namespace", "search")
        scriptname = settings.get("script", None)
        prerun_str = settings.get("prerun", "True").lower()
        prerun     = prerun_str.startswith('t') or prerun_str.startswith('y') or prerun_str.startswith('1')

        log("sessionKey %s owner %s namespace %s script %s prerun %s" % (sessionKey, owner, namespace, scriptname, prerun))
        
        if scriptname == None:
            raise Exception('"script" value required')
        if ".." in scriptname or "/" in scriptname or "\\" in scriptname:
            raise Exception('pathname cannot contain cannot contain "..", "/", or "\\".')
        home = si.splunkHome()
        localpath = os.path.join('etc', 'apps', namespace, 'scripts', scriptname + ".ss")
        pathname = os.path.join(home, localpath)
        if not os.path.exists(pathname):
            raise Exception('script path does not exist: "%s"' % os.path.join("SPLUNK_HOME", localpath))

        log("pathname %s" % (pathname))

        real_stdout = sys.stdout          
        if CAN_STREAM_RESULTS_ANY_TIME:
            # output results immediately to stdout            
            result_stream = sys.stdout  
        else:
            # output results once all done
            result_stream = StringIO.StringIO()

        # capture debugging stdout to StringIO, but have real stdout used for outputting results as streamed
        sys.stdout = StringIO.StringIO()
        
        script = scripting.Script(sessionKey, owner, namespace, path=pathname, prerunfix=prerun, outputstream=result_stream)
        side_effects = script.run()

        
        log("side_effects %s" % (side_effects))

        # output non-results -- variables and print statements from scripts
        sys.stdout.flush()
        messages = {}
        si.addInfoMessage(messages, "Variable values: %s" % side_effects)
        si.addInfoMessage(messages, "Standard output: %s" % sys.stdout.getvalue())

        # reset stdout
        sys.stdout = real_stdout
        OUTPUT_MSGS = True
        if OUTPUT_MSGS:
            # si.outputResults(None, messages)
            for level, messages in messages.items():
                for msg in messages:
                    print "%s=%s" % (level, normalizeMsg(msg))
            print

        # we haven't output results yet.  do it now.
        if not CAN_STREAM_RESULTS_ANY_TIME:
            result_stream.flush()
            print result_stream.getvalue()


    except Exception, e:
        sys.stdout = real_stdout        
        import traceback
        msg = "%s. Traceback: %s" % (e, traceback.format_exc())
        log("error %s" % msg)
        si.generateErrorResults(msg)
Beispiel #18
0
   # Get configuration values from config.ini
   local_conf = jiracommon.getLocalConf()

   hostname = local_conf.get('jira', 'hostname')
   username = local_conf.get('jira', 'username')
   password = local_conf.get('jira', 'password')
   protocol = local_conf.get('jira', 'soap_protocol');
   port = local_conf.get('jira', 'soap_port');

   url = "%s://%s:%s/rpc/soap/jirasoapservice-v2?wsdl" % (protocol, hostname, port)
   logger.info(url)
   client = Client(url)
   auth = client.service.login(username, password)

   keywords, argvals = isp.getKeywordsAndOptions()

   time_option = argvals.get('time', "now")

   logger.info('argv: ' + str(sys.argv))

   if sys.argv[1] == 'filters':
      filters =  client.service.getFavouriteFilters(auth)

      keys = (('author', None), ('id', None), ('name', None))

      results = []
      for filter in filters:
         row = jiracommon.flatten(filter, keys)
         logger.info(time.time())
         row['host'] = hostname
Beispiel #19
0
# send splunk results to slack

import prettytable
import ConfigParser
import requests
import json
import os
import sys

import splunk.Intersplunk as sis
(a, kwargs) = sis.getKeywordsAndOptions()
TRUE_VALUES = ['true', '1', 't', 'y', 'yes']


def get_pretty_table(results):
    if results:
        keys = results[0].keys()
    else:
        return ''
    x = prettytable.PrettyTable(keys, padding_width=4)
    for row in results:
        x.add_row([row[k] for k in keys])
    return "```" + x.get_string() + "```"


def main():
    # get config from config file
    config = ConfigParser.ConfigParser()

    if os.path.exists(os.path.join('..', 'local', 'slack.conf')):
        config.readfp(open(os.path.join('..', 'local', 'slack.conf')))
    def main(self, results, settings, info, suh):
        keywords, argvals = isp.getKeywordsAndOptions()
        sessionKey = settings.get("sessionKey", None)
        owner = settings.get("owner", None)
        namespace = settings.get("namespace", None)

        sid = info.get('_sid', '')
        child_sid = []
        suh.content['status.jobs.psid'] = sid
        logger.error(
            "[---] Splunk Debug printing SID inside main: {}".format(sid))
        self.dispatch_dir = getDispatchDir(sid,
                                           settings.get('sharedStorage', None))
        self.tmp_dir = os.path.join(self.dispatch_dir,
                                    DEFAULT_HDFS_TMP_DIR_NAME)
        if not os.path.exists(self.tmp_dir):
            os.makedirs(self.tmp_dir)

        self.validateRunExport(namespace, owner, sessionKey, argvals, sid)

        name = unquote(argvals['name']).strip()
        ej = self.getExportJob(name, owner, namespace, sessionKey)

        suh.name = ej.name
        suh.namespace = ej.namespace  # use export job's namespace and owner
        suh.owner = ej.owner
        suh.sessionKey = sessionKey

        # initialize hadoop env manager, including secure cluster info
        # the init is lazy and the manager caches hadoop env for better perf
        krb5_principal = unquote(
            argvals.get('kerberos_principal', ej.get('kerberos_principal',
                                                     '')))
        if krb5_principal != None:
            krb5_principal = krb5_principal.strip()
        if krb5_principal != None and len(krb5_principal) == 0:
            krb5_principal = None

        HadoopEnvManager.init(APP_NAME, 'nobody', sessionKey, krb5_principal)

        # for remote export:
        # ensure the hadoop env is setup correctly, this will throw if
        # HADOOP/JAVA_HOME are not set correctly
        # for local export:
        # simple return a copy of os.environ
        HadoopEnvManager.getEnv(ej['uri'], krb5_principal)

        replication = getintarg(argvals, ej, 'replication', 3)

        # build and dispatch search
        search = ej['search']
        dst = getDestination(ej['uri'], ej['base_path'])

        # make dst directory early on
        hj = self.createHadoopCliJob(dst, krb5_principal)
        makeHdfsDir(hj, dst)
        self.cleanup(name, dst, sid, child_sid, replication=replication)
        logger.error("[---] Splunk Debug is Checking for variables")
        min_start_time = 1243832400  # 6/1/2009, ~release date of 4.0

        # go back one minute to account for slight difference in search head & indexer time
        # TODO: make this offset configurable
        now = int(time.time())
        now = now - 60

        # some export jobs args can be overriten by runexport cmd args
        compress_level = getintarg(argvals, ej, 'compress_level', 2)
        if compress_level < 0 or compress_level > 9:
            raise HcException(
                HCERR0503, {
                    'name':
                    'compress_level',
                    'value':
                    compress_level,
                    'accepted_values':
                    'a integer between 0 to 9, 0 means no compression'
                })
        roll_size = getintarg(argvals, ej, 'roll_size', DEFAULT_ROLL_SIZE)
        maxspan = getintarg(
            argvals, ej, 'maxspan', 365 * 24 * 3600
        )  # do not export more than this time range at once, seconds, 0 == unlimited
        minspan = getintarg(
            argvals, ej, 'minspan',
            600)  # do not export less that this time range at once, seconds
        starttime = getintarg(argvals, ej, 'starttime', min_start_time)
        lt = getintarg(argvals, ej, 'endtime', now)
        jc = unquote(
            argvals.get('parallel_searches', ej.get('parallel_searches', '1')))
        format = unquote(argvals.get('format', ej.get('format', 'raw')))
        fields = unquote(argvals.get('fields', ej.get('fields', '_raw')))

        logger.error("[---] Splunk Debug done with variables")
        # interpret 0s:
        # for lt -> 0 means now,
        # for starttime means latest possible time, ie lt - minspan
        if lt == 0:
            lt = now
        if starttime == 0:
            starttime = lt - minspan - 1

        if starttime < min_start_time:
            starttime = min_start_time

        et = self.getLastCursorTime(name, dst, starttime)
        logger.error("[---] Splunk Debug printing the ET: {}".format(et))
        #TODO: figure out how to handle the first time run
        if maxspan > 0 and et + maxspan < lt:  # clamp lt if maxspan is given
            lt = et + maxspan
        logger.error("Printing et and lt: {} {}".format(et, lt))
        if lt - et < minspan:
            suh.content['status'] = 'done'
            suh.updateStatus()
            logger.error("[---] Splunk Debug Minspan problems")
            raise InfoSearchException(
                "Time range since last successful export is less than minspan. Not starting export job. (et=%d, lt=%d, range=%d, minspan=%d)"
                % (et, lt, int(lt - et), minspan))

        logger.error("[---] Splunk Debug verifying export fields")
        export_fields, required_fields = self.verifyExportFields(
            name, dst, format, fields)
        logger.error("[---] Done verifying")
        index_times = getIndexTimeDisjunction(name, et, lt)

        # check that all enabled peers are Up
        peers = entity.getEntities('search/distributed/peers',
                                   count="-1",
                                   search="disabled=0",
                                   namespace=namespace,
                                   owner=owner,
                                   sessionKey=sessionKey)
        peersDown = []
        peersAll = ['local']

        for pn, po in peers.items():
            if po.get('status', '').lower() != 'up':
                peersDown.append(po.get('peerName', ''))
            peersAll.append(po.get('peerName', ''))

        if len(peersDown) > 0:
            names = ', '.join(peersDown)
            raise HcException(HCERR1505, {
                'peer_count': len(peersDown),
                'peers': names
            })

        #TODO: ensure that no other search is running concurrently

        # create a number of searches depending on jobcount
        basefilename = "%s_%d_%d" % (getExportId(name), et, lt)
        evalSearch = '' if containsEvalSearch(search) else populateEvalSearch(
            ej)
        principal = '' if krb5_principal == None else 'kerberos_principal="' + krb5_principal + '"'
        logger.error("[---] Splunk Debug print the basefilename: {}".format(
            basefilename))
        searches = []

        if jc.lower().strip() == 'max':
            jc = len(peersAll)
        else:
            try:
                jc = int(jc)
            except:
                jc = 1

        useDumpCmd = False
        splunkVersion = None
        splunkMajorVersion = None
        try:
            splunkVersion = settings.get('splunkVersion', '')
            logger.info('splunkVersion:' + str(splunkVersion))
            i = splunkVersion.find(".")
            if i < 0:
                splunkMajorVersion = int(splunkVersion)
            else:
                splunkMajorVersion = int(splunkVersion[:i])
            logger.debug('splunkMajorVersion:' + str(splunkMajorVersion))
            useDumpArg = False if 'useDump' in argvals and argvals.get(
                'useDump') == '0' else True
            useDumpCmd = splunkMajorVersion >= 6 and useDumpArg
        except:
            logger.exception('Failed to parse splunk version:' +
                             str(splunkVersion))
        logger.info('useDumpCmd:' + str(useDumpCmd))
        logger.error('[---] Splunk Debug printing useDumpCmd:' +
                     str(useDumpCmd))
        if jc == 1 or len(peersAll) == 1:
            searches.append(
                buildSearchString('', index_times, search, evalSearch, sid,
                                  basefilename, name, roll_size, dst,
                                  principal, format, export_fields,
                                  required_fields, compress_level, useDumpCmd))
            #log the export jobs parameters
            logger.error(
                '[---] Splunk Debug: export args: starttime=%d, endtime=%d, minspan=%d, maxspan=%d, roll_size=%d, basefilename=%s, format=%s, fields="%s", search="%s", %s'
                % (starttime, lt, minspan, maxspan, roll_size, basefilename,
                   format, export_fields, search, principal))
            logger.info(
                'export args: starttime=%d, endtime=%d, minspan=%d, maxspan=%d, roll_size=%d, format=%s, fields="%s", search="%s", %s'
                % (starttime, lt, minspan, maxspan, roll_size, format,
                   export_fields, search, principal))
        else:
            i = 0
            for pl in splitList(peersAll, jc):
                splunk_servers = ' OR '.join(
                    ['splunk_server="' + ss + '"' for ss in pl])
                bfn = basefilename + '_' + str(i)
                searches.append(
                    buildSearchString('(' + splunk_servers + ')', index_times,
                                      search, evalSearch, sid, bfn, name,
                                      roll_size, dst, principal, format,
                                      export_fields, required_fields,
                                      compress_level, useDumpCmd))
                #log the export jobs parameters
                logger.info(
                    'export args: starttime=%d, endtime=%d, minspan=%d, maxspan=%d, roll_size=%d, format=%s, fields="%s", search="%s", %s'
                    % (starttime, lt, minspan, maxspan, roll_size, format,
                       export_fields, search, principal))
                i += 1

        logger.error(
            "[---] Splunk Debug Searches created: {}".format(searches))
        cpath = self.createCursor(name, lt, dst)

        # spawn the searches
        sj_st = float(time.time())  # search job start time
        sjobs = []
        for search in searches:
            logger.info('search:' + search)
            logger.error('[---] Splunk Debug is printing each search:' +
                         search)
            sj = splunk.search.dispatch(search,
                                        namespace=namespace,
                                        owner=owner,
                                        sessionKey=sessionKey)
            sjobs.append(sj)
            child_sid.append(sj.id)
            info.addInfoMessage('spawned search: sid=%s, search=%s' %
                                (sj.id, search))

        suh.initCommonFields(sjobs, et, lt)
        suh.updateFetchingStatus(sjobs)
        logger.error("Updating fetch status")
        # shut these guys up - too chatty!!!
        for chatty in ['splunk.search', 'splunk.rest']:
            chatty_logger = logging.getLogger(chatty)
            chatty_logger.setLevel(logging.INFO)

        # wait for all the searches to complete, succeeds iff all search jobs complete without errors
        logger.error("[---] Splunk Debug Waiting for searches.")
        msg = self.waitForSearches(sjobs, info, sid, name, suh)
        logger.error(
            "[---] Splunk Debug Waiting for searches is done. {}".format(msg))
        if len(msg) > 0:
            logger.error("Failed while waiting for search to complete:" +
                         str(msg))
            # this raises an exception
            self.cleanup(
                name, dst, sid, child_sid,
                HcException(
                    HCERR1512, {
                        'error_count': len(msg),
                        'child_sids': ','.join(child_sid),
                        'errors': json.dumps(msg)
                    }))

        sj_et = float(time.time())  #search job end time

        suh.updateMovingStatus()

        tmpfiles = self.listTmpFiles(name, dst)
        # write tempfiles to WAL, which lives in HDFS too
        self.writeWAL(name, dst, tmpfiles)

        # at this point we know that we have all the results and they are in tmp files and logged in WAL
        # we increment the cursor's state so that recovery can move any remaining chunks
        cpath = self.incrCursorState(cpath)
        # move files to their final destination
        moved_files = 0
        move_time = 0
        logger.error("[---] Splunk Debug printing cpath: {}".format(cpath))
        logger.error(
            "[---] Splunk Debug Moving tmp files Name: {}".format(name))
        logger.error("[---] Splunk Debug Moving tmp files Dst: {}".format(dst))
        logger.error("[---] Splunk Debug Moving tmp files Suh: {}".format(suh))
        logger.error("[---] Splunk Debug Moving tmp files tmpfiles: {}".format(
            tmpfiles))
        try:
            moved_files, move_time = self.renameTmpFiles(
                name, dst, suh, tmpfiles, replication=replication)
        except RenameException as e:
            self.cleanupRenameState(name, dst, cpath)
            raise e

        # finalize the cursor's state
        cpath = self.finalizeCursor(cpath)
        # remove WAL
        self.removeWAL(name, dst)

        hdfs_bytes = 0
        for f in tmpfiles:
            hdfs_bytes += f.size
        hdfs_bytes /= (1024 * 1024)

        logger_metrics.info(
            "group=export, exportname=\"%s\", sid=%s, child_sid=\"%s\", search_time=%.3f, moved_files=%d, move_time=%.3f, total_size_mb=%d"
            % (name, sid, ','.join(child_sid), sj_et - sj_st, moved_files,
               move_time, hdfs_bytes))

        suh.updateDoneStatus(et, lt)
Beispiel #21
0
    result = False
    if is_bool:
        rex = "^(?:t|true|1|yes)$"

    if (rex is None and arg in argvals) or (arg in argvals and re.match(rex, argvals[arg])):
        result = True
    return result


if __name__ == '__main__':
    logger = setup_logging()
    logger.info('starting..')
    eStart = time.time()
    try:
        results = si.readResults(None, None, False)
        keywords, argvals = si.getKeywordsAndOptions()
        validate_args(keywords, argvals)

        if arg_on_and_enabled(argvals, "debug", is_bool=True):
            logger.setLevel(logging.DEBUG)
            logger.debug("detecting debug argument passed, setting command log_level=DEBUG")

        output_column_name = "mvmath"
        if arg_on_and_enabled(argvals, "labelfield"):
            output_column_name = argvals['labelfield']

        if arg_on_and_enabled(argvals, "prefix"):
            output_column_name = argvals['prefix'] + output_column_name

        for row in results:
            if argvals['field'] in row and argvals['field2'] in row:
Beispiel #22
0
# send splunk results to slack

import prettytable
import ConfigParser
import requests
import json
import os
import sys
import splunk.Intersplunk as sis
(a, kwargs) = sis.getKeywordsAndOptions()
TRUE_VALUES = ['true', '1', 't', 'y', 'yes']

def get_pretty_table(results):
    if results:
        keys = results[0].keys()
    else:
        return ''
    x = prettytable.PrettyTable(keys, padding_width=4)
    for row in results:
        x.add_row([row[k] for k in keys])
    return "```" + x.get_string() + "```"


def main():
    # get config from config file
    config = ConfigParser.ConfigParser()
    config.readfp(open(os.path.join('..', 'default', 'slack.conf')))

    # username and icon can only be set by conf
    username = config.get('config', 'username')
    icon = config.get('config', 'icon')
Beispiel #23
0
def jpath():
    try:
        keywords, options = si.getKeywordsAndOptions()
        legacy_args_fixer(options)

        defaultval = options.get('default', None)
        fn_input = options.get('input', options.get('field', '_raw'))
        fn_output = options.get('output', 'jpath')
        if len(keywords) != 1:
            si.generateErrorResults('Requires exactly one path argument.')
            sys.exit(0)
        path = keywords[0]

        # Handle literal (escaped) quotes.  Presumably necessary because of raw args?
        path = path.replace(r'\"', '"')

        if "*" in fn_output:
            apply_output = output_to_wildcard
        else:
            apply_output = output_to_field

        try:
            jp = jmespath.compile(path)
        except ParseError as e:
            # Todo:  Consider stripping off the last line "  ^" pointing to the issue.
            # Not helpful since Splunk wraps the error message in a really ugly way.
            si.generateErrorResults(
                "Invalid JMESPath expression '{}'. {}".format(path, e))
            sys.exit(0)

        results, dummyresults, settings = si.getOrganizedResults()
        # for each results
        for result in results:
            # get field value
            ojson = result.get(fn_input, None)
            added = False
            if ojson is not None:
                if isinstance(ojson, (list, tuple)):
                    # XXX: Add proper support for multivalue input fields.  Just use first value for now
                    ojson = ojson[0]
                try:
                    json_obj = json.loads(ojson)
                except ValueError:
                    # Invalid JSON.  Move on, nothing to see here.
                    continue
                try:
                    values = jp.search(json_obj, options=jp_options)
                    apply_output(values, fn_output, result)
                    result[ERROR_FIELD] = None
                    added = True
                except UnknownFunctionError as e:
                    # Can't detect invalid function names during the compile, but we want to treat
                    # these like syntax errors:  Stop processing immediately
                    si.generateErrorResults(
                        "Issue with JMESPath expression. {}".format(e))
                    sys.exit(0)
                except JMESPathError as e:
                    # Not 100% sure I understand what these errors mean. Should they halt?
                    result[ERROR_FIELD] = "JMESPath error: {}".format(e)
                except Exception as e:
                    result[ERROR_FIELD] = "Exception: {}".format(e)

            if not added and defaultval is not None:
                result[fn_output] = defaultval

        si.outputResults(results)
    except Exception as e:
        import traceback

        stack = traceback.format_exc()
        si.generateErrorResults("Error '%s'. %s" % (e, stack))
Beispiel #24
0
    # Get configuration values from config.ini
    local_conf = jiracommon.getLocalConf()

    hostname = local_conf.get('jira', 'hostname')
    username = local_conf.get('jira', 'username')
    password = local_conf.get('jira', 'password')
    protocol = local_conf.get('jira', 'soap_protocol')
    port = local_conf.get('jira', 'soap_port')

    url = "%s://%s:%s/rpc/soap/jirasoapservice-v2?wsdl" % (protocol, hostname,
                                                           port)
    logger.info(url)
    client = Client(url)
    auth = client.service.login(username, password)

    keywords, argvals = isp.getKeywordsAndOptions()

    time_option = argvals.get('time', "now")

    logger.info('argv: ' + str(sys.argv))

    if sys.argv[1] == 'filters':
        filters = client.service.getFavouriteFilters(auth)

        keys = (('author', None), ('id', None), ('name', None))

        results = []
        for filter in filters:
            row = jiracommon.flatten(filter, keys)
            logger.info(time.time())
            row['host'] = hostname
Beispiel #25
0
    ########TEST####################
    kwargs = {}
    for f in ['owner','namespace','sessionKey','hostPath']:
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(messages, "max_terms must be between 1 and %s.  Using default." % MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception, e:
        maxTerms = MAX_SEARCH_COMPLEXITY

    dummy,options = si.getKeywordsAndOptions()
    makeORs    = isTrue(options.get("use_disjunct", "t"))
    eventsOnly = isTrue(options.get("eventsonly",   "f"))

    log("MAXTERMS: %s MAKEORS: %s eventsOnly: %s" % (maxTerms, makeORs, eventsOnly))
    log("tsearch: %s" % tsearch)

    results = []
    try:
        results = findTransaction(tname, tsearch, makeORs, eventsOnly, maxTerms, messages, **kwargs)
    except Exception, e:
        error(e)

    events = []
    log("RESULTS: %s" % len(results))
    for result in results:  # api fail
Beispiel #26
0
def execute():

    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        keywords, options = si.getKeywordsAndOptions()
        settings.update(options)

        sessionKey = settings.get("sessionKey", None)
        if TESTING and sessionKey == None:
            sessionKey = auth.getSessionKey('admin', 'changeme')
        owner = settings.get("owner", None)
        namespace = settings.get("namespace", "search")
        scriptname = settings.get("script", None)
        prerun_str = settings.get("prerun", "True").lower()
        prerun = prerun_str.startswith('t') or prerun_str.startswith(
            'y') or prerun_str.startswith('1')

        log("sessionKey %s owner %s namespace %s script %s prerun %s" %
            (sessionKey, owner, namespace, scriptname, prerun))

        if scriptname == None:
            raise Exception('"script" value required')
        if ".." in scriptname or "/" in scriptname or "\\" in scriptname:
            raise Exception(
                'pathname cannot contain cannot contain "..", "/", or "\\".')
        home = si.splunkHome()
        localpath = os.path.join('etc', 'apps', namespace, 'scripts',
                                 scriptname + ".ss")
        pathname = os.path.join(home, localpath)
        if not os.path.exists(pathname):
            raise Exception('script path does not exist: "%s"' %
                            os.path.join("SPLUNK_HOME", localpath))

        log("pathname %s" % (pathname))

        real_stdout = sys.stdout
        if CAN_STREAM_RESULTS_ANY_TIME:
            # output results immediately to stdout
            result_stream = sys.stdout
        else:
            # output results once all done
            result_stream = StringIO.StringIO()

        # capture debugging stdout to StringIO, but have real stdout used for outputting results as streamed
        sys.stdout = StringIO.StringIO()

        script = scripting.Script(sessionKey,
                                  owner,
                                  namespace,
                                  path=pathname,
                                  prerunfix=prerun,
                                  outputstream=result_stream)
        side_effects = script.run()

        log("side_effects %s" % (side_effects))

        # output non-results -- variables and print statements from scripts
        sys.stdout.flush()
        messages = {}
        si.addInfoMessage(messages, "Variable values: %s" % side_effects)
        si.addInfoMessage(messages,
                          "Standard output: %s" % sys.stdout.getvalue())

        # reset stdout
        sys.stdout = real_stdout
        OUTPUT_MSGS = True
        if OUTPUT_MSGS:
            # si.outputResults(None, messages)
            for level, messages in messages.items():
                for msg in messages:
                    print "%s=%s" % (level, normalizeMsg(msg))
            print

        # we haven't output results yet.  do it now.
        if not CAN_STREAM_RESULTS_ANY_TIME:
            result_stream.flush()
            print result_stream.getvalue()

    except Exception, e:
        sys.stdout = real_stdout
        import traceback
        msg = "%s. Traceback: %s" % (e, traceback.format_exc())
        log("error %s" % msg)
        si.generateErrorResults(msg)
Beispiel #27
0
def main():
    if len(sys.argv) < 3:
        usage()

    tname = sys.argv[1]
    #log("args")
    #for v in sys.argv:
    #    log(v)

    options = ["max_terms", "use_disjunct", "eventsonly"]
    srchargs = []
    log("ARGS: %s" % sys.argv[2:])
    for arg in sys.argv[2:]:
        for option in options:
            if arg.startswith(option):
                break
        else:
            srchargs.append(arg)
    if len(srchargs) == 0:
        usage()

    tsearch = ' '.join(srchargs)
    log("SEARCH: %s" % tsearch)

    results, dummyresults, settings = si.getOrganizedResults()
    results = []  # we don't care about incoming results

    ########TEST#####################
    if 'sessionKey' not in settings:
        settings['owner'] = 'admin'
        settings['password'] = '******'
        settings['namespace'] = 'search'
        settings['sessionKey'] = splunk.auth.getSessionKey('admin', 'changeme')
    ########TEST####################
    kwargs = {}
    for f in ['owner', 'namespace', 'sessionKey', 'hostPath']:
        if f in settings:
            kwargs[f] = settings[f]

    messages = {}
    try:
        maxTerms = int(settings.get("max_terms", MAX_SEARCH_COMPLEXITY))
        if maxTerms > MAX_SEARCH_COMPLEXITY or maxTerms < 1:
            si.addWarnMessage(
                messages,
                "max_terms must be between 1 and %s.  Using default." %
                MAX_SEARCH_COMPLEXITY)
            maxTerms = MAX_SEARCH_COMPLEXITY
    except Exception as e:
        maxTerms = MAX_SEARCH_COMPLEXITY

    dummy, options = si.getKeywordsAndOptions()
    makeORs = isTrue(options.get("use_disjunct", "t"))
    eventsOnly = isTrue(options.get("eventsonly", "f"))

    log("MAXTERMS: %s MAKEORS: %s eventsOnly: %s" %
        (maxTerms, makeORs, eventsOnly))
    log("tsearch: %s" % tsearch)

    results = []
    try:
        results = findTransaction(tname, tsearch, makeORs, eventsOnly,
                                  maxTerms, messages, **kwargs)
    except Exception as e:
        error(e)

    events = []
    log("RESULTS: %s" % len(results))
    for result in results:  # api fail
        event = {}
        for field in result:
            if field == '_time':
                event['_time'] = util.dt2epoch(
                    util.parseISO(str(result['_time'])))
            else:
                event[field] = result[field]
        events.append(event)

    si.outputResults(events, messages)
Beispiel #28
0
    
    local_conf = jiracommon.getLocalConf()
    
    hostname = local_conf.get('jira', 'hostname')
    port = local_conf.get('jira', 'port')
    proto = local_conf.get('jira', 'porotocol')
    baseURL = local_conf.get('jira', 'baseURL')
    username = local_conf.get('jira', 'username')
    password = local_conf.get('jira', 'password')
    
    url = "%s://%s:%s/%s/rpc/soap/jirasoapservice-v2?wsdl" % (proto, hostname, port, baseURL)
    logger.info(url)
    client = Client(url)
    auth = client.service.login(username, password)
    
    keywords, options = isp.getKeywordsAndOptions()
    
    logger.info('keywords: ' + str(keywords))
    logger.info('options: ' + str(options))
    
    if keywords[0] == 'filters':
        filters =  client.service.getFavouriteFilters(auth)

        keys = (('author', None), ('id', None), ('name', None))
        
        results = []
        for jfilter in filters:
            row = jiracommon.flatten(jfilter, keys)
            logger.info(time.time())
            row['_time'] = int(time.time())
            row['_raw'] = row
Beispiel #29
0
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 30 * DAY
YEAR = 12 * MONTH

# handle plurals nicely
def unitval(unit, val):
    plural = ""
    if val >= 2: plural = "s"
    return "%s %s%s ago" % (int(val), unit, plural)

if __name__ == '__main__':
    try:
        keywords,options = si.getKeywordsAndOptions()
        results,dumb1, dumb2 = si.getOrganizedResults()

        now = time.time()
        # for each result
        for result in results:
            utc = result.get('_time', None)
            if isinstance(utc, list):
                reltime = "unknown"
            elif utc == None:
                reltime = "unknown"
            else:
                diff = int(now - float(utc))
                if diff < -60:
                    reltime = "future"
                elif diff < 0: # handle weird case of client clock off slightly
Beispiel #30
0
def execute():
    results = []
    try:
        results, dummyresults, settings = si.getOrganizedResults()

        # default values
        args = {'namespace': 'search'}
        # get commandline args
        keywords, options = si.getKeywordsAndOptions()
        # override default args with settings from search kernel
        args.update(settings)
        # override default args with commandline args
        args.update(options)

        sessionKey = args.get("sessionKey", None)
        owner = args.get("owner", 'admin')
        namespace = args.get("namespace", None)

        if namespace.lower() == "none":
            namespace = None

        messages = {}

        if sessionKey == None:
            # this shouldn't happen, but it's useful for testing.
            try:
                sessionKey = sa.getSessionKey('admin', 'changeme')
                si.addWarnMessage(
                    messages,
                    "No session given to 'tune' command. Using default admin account and password."
                )
            except splunk.AuthenticationFailed, e:
                si.addErrorMessage(messages,
                                   "No session given to 'tune' command.")
                return

        if len(keywords) != 1:
            usage()

        # e.g., '/data/inputs/monitor'
        entity = keywords[0]
        logger.info("Entity: %s Args: %s" % (entity, args))

        results = []  # we don't care about incoming results
        try:
            entitys = en.getEntities(entity,
                                     sessionKey=sessionKey,
                                     owner=owner,
                                     namespace=namespace,
                                     count=-1)
            for name, entity in entitys.items():
                try:
                    myapp = entity["eai:acl"]["app"]
                    if namespace != None and myapp != namespace:
                        continue
                except:
                    continue  # if no eai:acl/app, filter out
                result = entityToResult(name, entity)
                results.append(result)
        except splunk.ResourceNotFound, e2:
            pass