Ejemplo n.º 1
0
    def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw):

        resp = JsonResponse()

        if sid == None or action == None or not action in JOB_ACTIONS:
            cherrypy.response.status = 400
            resp.success = False
            resp.addError(_("You must provide a job id(s) and a valid action."))
            return self.render_json(resp)

        if not type(sid) == list:
            sid = [sid]

        resp.data = []
        action = action.lower()

        for searchId in sid:
            try:
                job = splunk.search.getJob(searchId, sessionKey=cherrypy.session["sessionKey"])
                if action == "ttl":
                    response = job.setTTL(ttl)
                else:
                    actionMethod = getattr(job, action)
                    response = actionMethod()
            except splunk.ResourceNotFound, e:
                resp.addError(_("Splunk could not find a job with a job id of %s.") % searchId, sid=searchId)
                response = False
            resp.data.append({"sid": searchId, "action": action, "response": response})
Ejemplo n.º 2
0
    def listJobs(self,
                 restrictToSession=True,
                 nocache=False,
                 s=None,
                 cachebuster=None,
                 wait=True):
        '''
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        '''

        resp = JsonResponse()

        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint('search/jobs', '')
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(
                uri, getargs={
                    'id': s,
                    'output_mode': 'json'
                })

            return serverContent

        # normalize a single string into a list
        if isinstance(s, basestring): s = [s]

        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)

        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID, waitForRunning=wait)
                listing.append(job.toJsonable())

            except splunk.ResourceNotFound:
                listing.append({'sid': requestSID, '__notfound__': True})
                nocache = True  # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(
                    _('Splunk could not find a job with sid=%s.') % requestSID)

            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
Ejemplo n.º 3
0
    def dispatchJob(self, wait=True, **kwargs):
        '''
        Dispatches a new job
        '''
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(
                status=400,
                message=
                "Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'."
            )

        # setup the dispatch args
        options = kwargs.copy()
        q = options['search']
        del options['search']

        if 'maxEvents' not in options:
            options['maxEvents'] = EXPORT_HARDLIMIT

        # ensure that owner and namespace contexts are passed
        if 'owner' not in options:
            options['owner'] = cherrypy.session['user'].get('name')
        if 'namespace' not in options:
            options['namespace'] = splunk.getDefault('namespace')
            logger.warn(
                'search was dispatched without a namespace specified; defaulting to "%s"'
                % options['namespace'])

        # Add the default time format
        options['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')

        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        options["waitForRunning"] = wait

        resp = JsonResponse()

        try:
            logger.debug('q=%s' % q)
            logger.debug('options=%s' % options)

            # We're not going to read/write further from the user's session at this point...if we do, acquire the lock`
            # This can take significant time when there is a subsearch
            cherrypy.session.release_lock()

            job = splunk.search.dispatch(
                q, sessionKey=cherrypy.session['sessionKey'], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
Ejemplo n.º 4
0
    def uiindex(self, **kwargs):
        '''
        JSONResponse envelope of message data.
        '''
        resp = JsonResponse()

        try:
            msg = MsgPoolMgr.get_poolmgr_instance()[UI_MSG_POOL].pop(kwargs['id'])

            if msg:
                resp.addMessage(msg.severity.upper(), msg.text)

        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(_('The appserver was unable to connect to splunkd. Check if splunkd is still running. (%s)') % e.message)
Ejemplo n.º 5
0
    def dispatchJob(self, **kwargs):
        """
        Dispatches a new job
        """
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(
                status=400,
                message="Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'.",
            )

        # setup the dispatch args
        options = kwargs.copy()
        q = options["search"]
        del options["search"]

        if "maxEvents" not in options:
            options["maxEvents"] = EXPORT_HARDLIMIT

        # ensure that owner and namespace contexts are passed
        if "owner" not in options:
            options["owner"] = cherrypy.session["user"].get("name")
        if "namespace" not in options:
            options["namespace"] = splunk.getDefault("namespace")
            logger.warn(
                'search was dispatched without a namespace specified; defaulting to "%s"' % options["namespace"]
            )

        # Add the default time format
        options["time_format"] = cherrypy.config.get("DISPATCH_TIME_FORMAT")

        resp = JsonResponse()

        try:
            try:
                userid = cherrypy.session["user"]["name"]
                querylogger.warn("%s\t%s" % (userid, q))
            except:
                pass

            logger.debug("q=%s" % q)
            logger.debug("options=%s" % options)
            job = splunk.search.dispatch(q, sessionKey=cherrypy.session["sessionKey"], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
    def dispatchJob(self, wait=True, **kwargs):
        '''
        Dispatches a new job
        '''
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(status=400, message="Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'.")

        # setup the dispatch args
        options = kwargs.copy()
        q = options['search']
        del options['search']

        if 'maxEvents' not in options:
            options['maxEvents'] = EXPORT_HARDLIMIT
        
        # ensure that owner and namespace contexts are passed
        if 'owner' not in options:
            options['owner'] = cherrypy.session['user'].get('name')
        if 'namespace' not in options:
            options['namespace'] = splunk.getDefault('namespace')
            logger.warn('search was dispatched without a namespace specified; defaulting to "%s"' % options['namespace'])

        # Add the default time format
        options['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')
        
        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        options["waitForRunning"] = wait
     
        resp = JsonResponse()

        try:
            logger.debug('q=%s' % q)
            logger.debug('options=%s' % options)

            # We're not going to read/write further from the user's session at this point...if we do, acquire the lock`
            # This can take significant time when there is a subsearch
            cherrypy.session.release_lock()

            job = splunk.search.dispatch(q, sessionKey=cherrypy.session['sessionKey'], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
Ejemplo n.º 7
0
    def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None):
        """
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        """

        resp = JsonResponse()

        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint("search/jobs", "")
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(uri, getargs={"id": s, "output_mode": "json"})

            return serverContent

        # normalize a single string into a list
        if isinstance(s, basestring):
            s = [s]

        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID)
                listing.append(job.toJsonable())

            except splunk.ResourceNotFound:
                listing.append({"sid": requestSID, "__notfound__": True})
                nocache = (
                    True
                )  # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(_("Splunk could not find a job with sid=%s.") % requestSID)

            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
    def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None, wait=True):
        '''
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        '''

        resp = JsonResponse()
        
        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint('search/jobs', '')
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(uri, getargs={'id':s, 'output_mode':'json'})
            
            return serverContent
            
        # normalize a single string into a list
        if isinstance(s, basestring): s = [s]
        
        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        
        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID, waitForRunning=wait)
                listing.append(job.toJsonable())
                
            except splunk.ResourceNotFound:
                listing.append({'sid': requestSID, '__notfound__': True})
                nocache = True # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(_('Splunk could not find a job with sid=%s.') % requestSID)
                
            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
    def uiindex(self, **kwargs):
        '''
        JSONResponse envelope of message data.
        '''
        resp = JsonResponse()

        try:
            msg = MsgPoolMgr.get_poolmgr_instance()[UI_MSG_POOL].pop(
                kwargs['id'])

            if msg:
                resp.addMessage(msg.severity.upper(), msg.text)

        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(
                _('The appserver was unable to connect to splunkd. Check if splunkd is still running. (%s)'
                  ) % e.message)
Ejemplo n.º 10
0
    def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw):

        resp = JsonResponse()

        if sid == None or action == None or not action in JOB_ACTIONS:
            cherrypy.response.status = 400
            resp.success = False
            resp.addError(
                _('You must provide a job id(s) and a valid action.'))
            return self.render_json(resp)

        if not type(sid) == list:
            sid = [sid]

        resp.data = []
        action = action.lower()

        for searchId in sid:
            try:
                job = splunk.search.getJob(
                    searchId, sessionKey=cherrypy.session['sessionKey'])
                if action == 'ttl':
                    response = job.setTTL(ttl)
                else:
                    actionMethod = getattr(job, action)
                    response = actionMethod()
            except splunk.ResourceNotFound, e:
                resp.addError(
                    _('Splunk could not find a job with a job id of %s.') %
                    searchId,
                    sid=searchId)
                response = False
            resp.data.append({
                'sid': searchId,
                'action': action,
                'response': response
            })
Ejemplo n.º 11
0
    def getJobAsset(self, sid, asset, **kwargs):
        """
        Returns specific asset for a given job
        """

        job_lite = splunk.search.JobLite(sid)

        # set response type; default to XML output
        if "outputMode" not in kwargs:
            kwargs["outputMode"] = "xml"

        outputMode = kwargs["outputMode"]
        if outputMode == "json":
            ct = MIME_JSON
        elif outputMode == "raw":
            ct = MIME_TEXT
        elif outputMode == "csv":
            ct = MIME_CSV
        else:
            outputMode = "xml"
            ct = MIME_XML

        cherrypy.response.headers["content-type"] = ct

        # if we're exporting, set the correct headers, to get the brower to show a download
        # dialog. also hardlimit the export cap to 10,000 events.
        if "isDownload" in kwargs:
            if outputMode == "raw":
                extension = "txt"
            else:
                extension = outputMode

            if kwargs.has_key("filename") and len(kwargs["filename"]) > 0:
                if kwargs["filename"].find(".") > -1:
                    filename = kwargs["filename"]
                else:
                    filename = "%s.%s" % (kwargs["filename"], extension)
            else:
                filename = "%s.%s" % (sid.replace(".", "_"), extension)

            # sanitize filenames
            clean_filename = re.split(r'[\r\n;"\']+', filename)[0]
            clean_filename = clean_filename[:255]

            cherrypy.response.headers["content-type"] = "application/force-download"
            cherrypy.response.headers["content-disposition"] = 'attachment; filename="%s"' % clean_filename

            rs = job_lite.getResults("results_preview", 0, 1)

            # by default, exclude underscore fields except time and raw
            if "field_list" not in kwargs:
                if not rs:
                    resp = JsonResponse()
                    cherrypy.response.status = 404
                    resp.success = False
                    resp.addError("job sid=%s not found" % sid)
                    return self.render_json(resp)

                kwargs["field_list"] = [
                    x for x in rs.fieldOrder() if (not x.startswith("_") or x == "_time" or x == "_raw")
                ]

            job = splunk.search.getJob(sid)
            return self.streamJobExport(job, asset, **kwargs)

        # set default time format
        if "time_format" not in kwargs and "timeFormat" not in kwargs:
            kwargs["time_format"] = cherrypy.config.get("DISPATCH_TIME_FORMAT")

        # SPL-34380, if the url will be too long, remove the field_list value.
        # This is just a bandaid for now, a better solution involves splunkd
        # patching.
        url_len = len(urllib.urlencode(kwargs))
        if url_len > 8192:  # Max url length
            logger.warn("field_list argument removed in REST call to shorten URL")
            kwargs.pop("field_list", None)
            kwargs.pop("f", None)

        # pass through the search options
        job_lite.setFetchOption(**kwargs)

        try:
            output = job_lite.get(asset)
        except:
            resp = JsonResponse()
            cherrypy.response.status = 404
            resp.success = False
            resp.addError("job sid=%s not found" % sid)
            return self.render_json(resp)

        # TODO:
        # handle server-side XSL transforms
        moduleName = cherrypy.request.headers.get("X-Splunk-Module", None)
        if "moduleName" in kwargs:
            moduleName = kwargs.get("moduleName")

        if moduleName and ("xsl" in kwargs) and output:

            # logger.debug('search api got xsl request: %s' % moduleName)

            # get XSL file
            xslFilePath = os.path.join(self.moduleRoster[moduleName]["path"], kwargs["xsl"])
            f = open(xslFilePath, "r")
            xslt_doc = et.parse(f)
            f.close()

            # generate transformer
            transform = et.XSLT(xslt_doc)

            # transform the XML
            xmlDoc = et.fromstring(output)
            transformedOutput = transform(xmlDoc)

            cherrypy.response.headers["content-type"] = MIME_HTML

            html = et.tostring(transformedOutput)
            if not html:
                output = "Loading..."
            else:
                output = html

        # This handles the edge case when output returns no results but
        # a content-type of html is still expected, say by jQuery's $.ajax
        # method.  This could be avoided if the response returned a valid
        # xml document while maintaining a content-type of xml.  Currently
        # empty results are rendered as content-length 0 which jQuery fails
        # on parsing, as it expects xml.
        elif moduleName and ("xsl" in kwargs) and not output:
            logger.debug(
                "Search api got xsl request, but no search results "
                + "were returned. Setting content-type to html anyway"
            )
            cherrypy.response.headers["content-type"] = MIME_HTML

            output = "Loading..."

        # otherwise, return raw contents
        if util.apply_etag(output):
            return None
        else:
            return output
Ejemplo n.º 12
0
    def getJobAsset(self, sid, asset, compat_mode=True, **kwargs):
        '''
        Returns specific asset for a given job

        compat_mode: When enabled results in JSON transformed to 4.X variant for 
        results, events and results_preview asset types.
        '''
        
        compat_mode = splunk.util.normalizeBoolean(compat_mode)
         
        job_lite = splunk.search.JobLite(sid)

        # set response type; default to XML output
        if 'outputMode' not in kwargs:
            kwargs['outputMode'] = 'xml'

        outputMode = kwargs['outputMode']
        if outputMode == 'json': ct = splunk.appserver.mrsparkle.MIME_JSON
        elif outputMode == 'raw': ct = MIME_TEXT
        elif outputMode == 'csv': ct = MIME_CSV
        else: 
            outputMode = 'xml'
            ct = MIME_XML

        cherrypy.response.headers['content-type'] = ct
        
        # if we're exporting, set the correct headers, to get the brower to show a download
        # dialog. also hardlimit the export cap to 10,000 events.
        if 'isDownload' in kwargs:
            if outputMode == 'raw':
                extension = 'txt'
            else:
                extension = outputMode

            if kwargs.has_key('filename') and len(kwargs["filename"]) > 0:
                if kwargs['filename'].find('.') > -1:
                    filename = kwargs['filename']
                else:
                    filename = "%s.%s" % ( kwargs['filename'], extension)
            else:
                filename = "%s.%s" % ( sid.replace('.','_'), extension)

            # sanitize filenames
            clean_filename = re.split(r'[\r\n;"\']+', filename.encode("utf-8"))[0]
            clean_filename = clean_filename[:255]
            clean_filename = clean_filename.replace(' ', '_')

            cherrypy.response.headers['content-type'] = 'application/force-download'  
            cherrypy.response.headers['content-disposition'] = 'attachment; filename="%s"' % clean_filename

            rs = job_lite.getResults('results_preview', 0, 1)
            
            # by default, exclude underscore fields except time and raw
            if 'field_list' not in kwargs:
                if not rs:
                    resp = JsonResponse()
                    cherrypy.response.status = 404
                    resp.success = False
                    resp.addError("job sid=%s not found" % sid)
                    return self.render_json(resp)

                kwargs['field_list'] = [x for x in rs.fieldOrder() if (not x.startswith('_') or x == '_time' or x == '_raw')]

            job = splunk.search.getJob(sid)
            return self.streamJobExport(job, asset, **kwargs)

        # set default time format
        if 'time_format' not in kwargs and 'timeFormat' not in kwargs:
            kwargs['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')

        # SPL-34380, if the url will be too long, remove the field_list value.
        # This is just a bandaid for now, a better solution involves splunkd 
        # patching.
        url_len = len(urllib.urlencode(kwargs))
        if url_len > 8192: # Max url length
            logger.warn('field_list argument removed in REST call to shorten URL')
            kwargs.pop('field_list', None)
            kwargs.pop('f', None)
            
        # pass through the search options
        job_lite.setFetchOption(**kwargs)

        try:
            output = job_lite.get(asset)
        except:
            resp = JsonResponse()
            cherrypy.response.status = 404
            resp.success = False
            resp.addError("job sid=%s not found" % sid)
            return self.render_json(resp)
                

        # TODO:
        # handle server-side XSL transforms
        moduleName = cherrypy.request.headers.get('X-Splunk-Module', None)
        if 'moduleName' in kwargs:
            moduleName = kwargs.get('moduleName')
            
        if outputMode == 'json' and output and compat_mode and asset in ['results_preview', 'results', 'events']:
            # transform json to pre-5.0 format for backwards compliance
            try:
                data = json.loads(output)
            except:
                pass
            else:
                output = json.dumps(data.get('results', []))
        
        elif moduleName and ('xsl' in kwargs) and output:
            
            #logger.debug('search api got xsl request: %s' % moduleName)
            
            # get XSL file
            xslFilePath = os.path.abspath(os.path.expandvars(os.path.join(self.moduleRoster[moduleName]['path'], kwargs['xsl'])))
            splunkHomePath = os.path.expandvars('$SPLUNK_HOME')
            
            if (xslFilePath.startswith(splunkHomePath)):
                try:
                    f = open(xslFilePath, 'r')
                    xslt_doc = et.parse(f)
                    f.close()
                
                    # generate transformer
                    transform = et.XSLT(xslt_doc)
                
                    # transform the XML
                    xmlDoc = et.fromstring(output)
                    transformedOutput = transform(xmlDoc)
                
                    cherrypy.response.headers['content-type'] = MIME_HTML
    
                    html = et.tostring(transformedOutput)
                    if not html:
                        output = 'Loading...'
                    else:
                        output = html
                except Exception, e:
                    cherrypy.response.headers['content-type'] = MIME_HTML
                    logger.warn('Exception occurred while transforming xml results -')
                    output = 'Error occurred while performing xslt transform on results. Please check the logs for errors.'

            else:
                cherrypy.response.headers['content-type'] = MIME_HTML
                logger.warn('File xsl="%s" is out of $SPLUNK_HOME="%s"' % (xslFilePath, splunkHomePath))
                output = 'The file you are trying to access is not under the $SPLUNK_HOME directory'
Ejemplo n.º 13
0
    def getJobAsset(self, sid, asset, compat_mode=True, **kwargs):
        '''
        Returns specific asset for a given job

        compat_mode: When enabled results in JSON transformed to 4.X variant for 
        results, events and results_preview asset types.
        '''

        compat_mode = splunk.util.normalizeBoolean(compat_mode)

        job_lite = splunk.search.JobLite(sid)

        # set response type; default to XML output
        if 'outputMode' not in kwargs:
            kwargs['outputMode'] = 'xml'

        outputMode = kwargs['outputMode']
        if outputMode == 'json': ct = splunk.appserver.mrsparkle.MIME_JSON
        elif outputMode == 'raw': ct = MIME_TEXT
        elif outputMode == 'csv': ct = MIME_CSV
        else:
            outputMode = 'xml'
            ct = MIME_XML

        cherrypy.response.headers['content-type'] = ct

        # if we're exporting, set the correct headers, to get the brower to show a download
        # dialog. also hardlimit the export cap to 10,000 events.
        if 'isDownload' in kwargs:
            if outputMode == 'raw':
                extension = 'txt'
            else:
                extension = outputMode

            if kwargs.has_key('filename') and len(kwargs["filename"]) > 0:
                if kwargs['filename'].find('.') > -1:
                    filename = kwargs['filename']
                else:
                    filename = "%s.%s" % (kwargs['filename'], extension)
            else:
                filename = "%s.%s" % (sid.replace('.', '_'), extension)

            # sanitize filenames
            clean_filename = re.split(r'[\r\n;"\']+',
                                      filename.encode("utf-8"))[0]
            clean_filename = clean_filename[:255]
            clean_filename = clean_filename.replace(' ', '_')

            cherrypy.response.headers[
                'content-type'] = 'application/force-download'
            cherrypy.response.headers[
                'content-disposition'] = 'attachment; filename="%s"' % clean_filename

            rs = job_lite.getResults('results_preview', 0, 1)

            # by default, exclude underscore fields except time and raw
            if 'field_list' not in kwargs:
                if not rs:
                    resp = JsonResponse()
                    cherrypy.response.status = 404
                    resp.success = False
                    resp.addError("job sid=%s not found" % sid)
                    return self.render_json(resp)

                kwargs['field_list'] = [
                    x for x in rs.fieldOrder()
                    if (not x.startswith('_') or x == '_time' or x == '_raw')
                ]

            job = splunk.search.getJob(sid)
            return self.streamJobExport(job, asset, **kwargs)

        # set default time format
        if 'time_format' not in kwargs and 'timeFormat' not in kwargs:
            kwargs['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')

        # SPL-34380, if the url will be too long, remove the field_list value.
        # This is just a bandaid for now, a better solution involves splunkd
        # patching.
        url_len = len(urllib.urlencode(kwargs))
        if url_len > 8192:  # Max url length
            logger.warn(
                'field_list argument removed in REST call to shorten URL')
            kwargs.pop('field_list', None)
            kwargs.pop('f', None)

        # pass through the search options
        job_lite.setFetchOption(**kwargs)

        try:
            output = job_lite.get(asset)
        except:
            resp = JsonResponse()
            cherrypy.response.status = 404
            resp.success = False
            resp.addError("job sid=%s not found" % sid)
            return self.render_json(resp)

        # TODO:
        # handle server-side XSL transforms
        moduleName = cherrypy.request.headers.get('X-Splunk-Module', None)
        if 'moduleName' in kwargs:
            moduleName = kwargs.get('moduleName')

        if outputMode == 'json' and output and compat_mode and asset in [
                'results_preview', 'results', 'events'
        ]:
            # transform json to pre-5.0 format for backwards compliance
            try:
                data = json.loads(output)
            except:
                pass
            else:
                output = json.dumps(data.get('results', []))

        elif moduleName and ('xsl' in kwargs) and output:

            #logger.debug('search api got xsl request: %s' % moduleName)

            # get XSL file
            xslFilePath = os.path.abspath(
                os.path.expandvars(
                    os.path.join(self.moduleRoster[moduleName]['path'],
                                 kwargs['xsl'])))
            splunkHomePath = os.path.expandvars('$SPLUNK_HOME')

            if (xslFilePath.startswith(splunkHomePath)):
                try:
                    f = open(xslFilePath, 'r')
                    xslt_doc = et.parse(f)
                    f.close()

                    # generate transformer
                    transform = et.XSLT(xslt_doc)

                    # transform the XML
                    xmlDoc = et.fromstring(output)
                    transformedOutput = transform(xmlDoc)

                    cherrypy.response.headers['content-type'] = MIME_HTML

                    html = et.tostring(transformedOutput)
                    if not html:
                        output = 'Loading...'
                    else:
                        output = html
                except Exception, e:
                    cherrypy.response.headers['content-type'] = MIME_HTML
                    logger.warn(
                        'Exception occurred while transforming xml results -')
                    output = 'Error occurred while performing xslt transform on results. Please check the logs for errors.'

            else:
                cherrypy.response.headers['content-type'] = MIME_HTML
                logger.warn('File xsl="%s" is out of $SPLUNK_HOME="%s"' %
                            (xslFilePath, splunkHomePath))
                output = 'The file you are trying to access is not under the $SPLUNK_HOME directory'