def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw): resp = JsonResponse() if sid == None or action == None or not action in JOB_ACTIONS: cherrypy.response.status = 400 resp.success = False resp.addError(_("You must provide a job id(s) and a valid action.")) return self.render_json(resp) if not type(sid) == list: sid = [sid] resp.data = [] action = action.lower() for searchId in sid: try: job = splunk.search.getJob(searchId, sessionKey=cherrypy.session["sessionKey"]) if action == "ttl": response = job.setTTL(ttl) else: actionMethod = getattr(job, action) response = actionMethod() except splunk.ResourceNotFound, e: resp.addError(_("Splunk could not find a job with a job id of %s.") % searchId, sid=searchId) response = False resp.data.append({"sid": searchId, "action": action, "response": response})
def controlJob(self, sid, ctl, action, wait=True, ttl=None): ''' Executes control for a given job ''' resp = JsonResponse() # SDK does not have any functionality to change ACLs, so i have # had to add these custom actions for now. # TODO - if/when SDK has support for changing ACL's, rewrite this # code to not use the Entity class. if (action == "makeWorldReadable" or action == "undoWorldReadable"): jobPath = "search/jobs/" + sid entityName = "acl" aclEntity = en.getEntity( jobPath, entityName, namespace="system", owner=splunk.auth.getCurrentUser()['name']) if (action == "makeWorldReadable"): aclEntity.properties['perms.read'] = ["*"] else: aclEntity.properties['perms.read'] = [] try: result = en.setEntity(aclEntity, uri=jobPath + "/acl") except Exception, e: logger.exception(e) resp.addError( _("Splunk could not update permissions for this job")) return self.render_json(resp)
def controlJob(self, sid, ctl, action, ttl=None): """ Executes control for a given job """ resp = JsonResponse() # SDK does not have any functionality to change ACLs, so i have # had to add these custom actions for now. # TODO - if/when SDK has support for changing ACL's, rewrite this # code to not use the Entity class. if action == "makeWorldReadable" or action == "undoWorldReadable": jobPath = "search/jobs/" + sid entityName = "acl" aclEntity = en.getEntity( jobPath, entityName, namespace="system", owner=splunk.auth.getCurrentUser()["name"] ) if action == "makeWorldReadable": aclEntity.properties["perms.read"] = ["*"] else: aclEntity.properties["perms.read"] = [] try: result = en.setEntity(aclEntity, uri=jobPath + "/acl") except Exception, e: logger.exception(e) resp.addError(_("Splunk could not update permissions for this job")) return self.render_json(resp)
def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None, wait=True): ''' Returns a listing of jobs that the client needs to be aware of; listing is restricted by user session, and optionally filtered by a whitelist provided by the client ''' resp = JsonResponse() # dump out if no jobs are specified if not s: resp.data = [] return self.render_json(resp) if 0: uri = en.buildEndpoint('search/jobs', '') logger.error("uri: %s" % uri) serverResponse, serverContent = rest.simpleRequest( uri, getargs={ 'id': s, 'output_mode': 'json' }) return serverContent # normalize a single string into a list if isinstance(s, basestring): s = [s] # bypass the legacy sdk blocking for RUNNING state wait = splunk.util.normalizeBoolean(wait) # loop over all all requested jobs and ask server for status listing = [] for requestSID in s: try: job = splunk.search.getJob(requestSID, waitForRunning=wait) listing.append(job.toJsonable()) except splunk.ResourceNotFound: listing.append({'sid': requestSID, '__notfound__': True}) nocache = True # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist resp.addError( _('Splunk could not find a job with sid=%s.') % requestSID) except Exception, e: logger.exception(e) resp.success = False resp.addError(str(e)) return self.render_json(resp)
def field_actions(self, actions, namespace, sid, offset, field_name=None, field_value=None, latest_time=None, view=None, **args): ''' Returns a set of field action response for a given field or event. Assumptions: 1) If field_name and field_value are provided, the data structure returned is representative of the single field. 2) If either both field_name and field_value are absent, the data structure returned is assumed to be for the entire event. ''' # These map to the workflow_actions.conf spec, just to simplify changes in the spec FA_CONF = '/data/ui/workflow-actions' FA_TYPE = 'type' FA_LABEL = 'label' FA_FIELDS = 'fields' FA_EVENTTYPES = 'eventtypes' FA_DISPLAY_LOCATION = 'display_location' FA_DISPLAY_LOCATION_FIELD_MENU = 'field_menu' FA_DISPLAY_LOCATION_EVENT_MENU = 'event_menu' FA_DISPLAY_LOCATION_BOTH = 'both' FA_DISPLAY_LOCATION_DEFAULT = FA_DISPLAY_LOCATION_BOTH # This ensures that only enabled field actions are retrieved FA_FIELD_ACTION_SEARCH = "disabled=0" EVENTTYPE_FIELD_NAME = 'eventtype' resp = JsonResponse() try: offset = int(offset) except ValueError, e: msg = _( "The job's event offset must be a valid integer in order to generate field actions." ) logger.error(msg) resp.addError(msg) return self.render_json(resp)
def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None): """ Returns a listing of jobs that the client needs to be aware of; listing is restricted by user session, and optionally filtered by a whitelist provided by the client """ resp = JsonResponse() # dump out if no jobs are specified if not s: resp.data = [] return self.render_json(resp) if 0: uri = en.buildEndpoint("search/jobs", "") logger.error("uri: %s" % uri) serverResponse, serverContent = rest.simpleRequest(uri, getargs={"id": s, "output_mode": "json"}) return serverContent # normalize a single string into a list if isinstance(s, basestring): s = [s] # loop over all all requested jobs and ask server for status listing = [] for requestSID in s: try: job = splunk.search.getJob(requestSID) listing.append(job.toJsonable()) except splunk.ResourceNotFound: listing.append({"sid": requestSID, "__notfound__": True}) nocache = ( True ) # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist resp.addError(_("Splunk could not find a job with sid=%s.") % requestSID) except Exception, e: logger.exception(e) resp.success = False resp.addError(str(e)) return self.render_json(resp)
def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None, wait=True): ''' Returns a listing of jobs that the client needs to be aware of; listing is restricted by user session, and optionally filtered by a whitelist provided by the client ''' resp = JsonResponse() # dump out if no jobs are specified if not s: resp.data = [] return self.render_json(resp) if 0: uri = en.buildEndpoint('search/jobs', '') logger.error("uri: %s" % uri) serverResponse, serverContent = rest.simpleRequest(uri, getargs={'id':s, 'output_mode':'json'}) return serverContent # normalize a single string into a list if isinstance(s, basestring): s = [s] # bypass the legacy sdk blocking for RUNNING state wait = splunk.util.normalizeBoolean(wait) # loop over all all requested jobs and ask server for status listing = [] for requestSID in s: try: job = splunk.search.getJob(requestSID, waitForRunning=wait) listing.append(job.toJsonable()) except splunk.ResourceNotFound: listing.append({'sid': requestSID, '__notfound__': True}) nocache = True # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist resp.addError(_('Splunk could not find a job with sid=%s.') % requestSID) except Exception, e: logger.exception(e) resp.success = False resp.addError(str(e)) return self.render_json(resp)
def field_actions(self, actions, namespace, sid, offset, field_name=None, field_value=None, latest_time=None, view=None, **args): ''' Returns a set of field action response for a given field or event. Assumptions: 1) If field_name and field_value are provided, the data structure returned is representative of the single field. 2) If either both field_name and field_value are absent, the data structure returned is assumed to be for the entire event. ''' # These map to the workflow_actions.conf spec, just to simplify changes in the spec FA_CONF = '/data/ui/workflow-actions' FA_TYPE = 'type' FA_LABEL = 'label' FA_FIELDS = 'fields' FA_EVENTTYPES = 'eventtypes' FA_DISPLAY_LOCATION = 'display_location' FA_DISPLAY_LOCATION_FIELD_MENU = 'field_menu' FA_DISPLAY_LOCATION_EVENT_MENU = 'event_menu' FA_DISPLAY_LOCATION_BOTH = 'both' FA_DISPLAY_LOCATION_DEFAULT = FA_DISPLAY_LOCATION_BOTH # This ensures that only enabled field actions are retrieved FA_FIELD_ACTION_SEARCH = "disabled=0" EVENTTYPE_FIELD_NAME = 'eventtype' resp = JsonResponse() try: offset = int(offset) except ValueError, e: msg = _("The job's event offset must be a valid integer in order to generate field actions.") logger.error(msg) resp.addError(msg) return self.render_json(resp)
def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw): resp = JsonResponse() if sid == None or action == None or not action in JOB_ACTIONS: cherrypy.response.status = 400 resp.success = False resp.addError( _('You must provide a job id(s) and a valid action.')) return self.render_json(resp) if not type(sid) == list: sid = [sid] resp.data = [] action = action.lower() for searchId in sid: try: job = splunk.search.getJob( searchId, sessionKey=cherrypy.session['sessionKey']) if action == 'ttl': response = job.setTTL(ttl) else: actionMethod = getattr(job, action) response = actionMethod() except splunk.ResourceNotFound, e: resp.addError( _('Splunk could not find a job with a job id of %s.') % searchId, sid=searchId) response = False resp.data.append({ 'sid': searchId, 'action': action, 'response': response })
def getJobAsset(self, sid, asset, **kwargs): """ Returns specific asset for a given job """ job_lite = splunk.search.JobLite(sid) # set response type; default to XML output if "outputMode" not in kwargs: kwargs["outputMode"] = "xml" outputMode = kwargs["outputMode"] if outputMode == "json": ct = MIME_JSON elif outputMode == "raw": ct = MIME_TEXT elif outputMode == "csv": ct = MIME_CSV else: outputMode = "xml" ct = MIME_XML cherrypy.response.headers["content-type"] = ct # if we're exporting, set the correct headers, to get the brower to show a download # dialog. also hardlimit the export cap to 10,000 events. if "isDownload" in kwargs: if outputMode == "raw": extension = "txt" else: extension = outputMode if kwargs.has_key("filename") and len(kwargs["filename"]) > 0: if kwargs["filename"].find(".") > -1: filename = kwargs["filename"] else: filename = "%s.%s" % (kwargs["filename"], extension) else: filename = "%s.%s" % (sid.replace(".", "_"), extension) # sanitize filenames clean_filename = re.split(r'[\r\n;"\']+', filename)[0] clean_filename = clean_filename[:255] cherrypy.response.headers["content-type"] = "application/force-download" cherrypy.response.headers["content-disposition"] = 'attachment; filename="%s"' % clean_filename rs = job_lite.getResults("results_preview", 0, 1) # by default, exclude underscore fields except time and raw if "field_list" not in kwargs: if not rs: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) kwargs["field_list"] = [ x for x in rs.fieldOrder() if (not x.startswith("_") or x == "_time" or x == "_raw") ] job = splunk.search.getJob(sid) return self.streamJobExport(job, asset, **kwargs) # set default time format if "time_format" not in kwargs and "timeFormat" not in kwargs: kwargs["time_format"] = cherrypy.config.get("DISPATCH_TIME_FORMAT") # SPL-34380, if the url will be too long, remove the field_list value. # This is just a bandaid for now, a better solution involves splunkd # patching. url_len = len(urllib.urlencode(kwargs)) if url_len > 8192: # Max url length logger.warn("field_list argument removed in REST call to shorten URL") kwargs.pop("field_list", None) kwargs.pop("f", None) # pass through the search options job_lite.setFetchOption(**kwargs) try: output = job_lite.get(asset) except: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) # TODO: # handle server-side XSL transforms moduleName = cherrypy.request.headers.get("X-Splunk-Module", None) if "moduleName" in kwargs: moduleName = kwargs.get("moduleName") if moduleName and ("xsl" in kwargs) and output: # logger.debug('search api got xsl request: %s' % moduleName) # get XSL file xslFilePath = os.path.join(self.moduleRoster[moduleName]["path"], kwargs["xsl"]) f = open(xslFilePath, "r") xslt_doc = et.parse(f) f.close() # generate transformer transform = et.XSLT(xslt_doc) # transform the XML xmlDoc = et.fromstring(output) transformedOutput = transform(xmlDoc) cherrypy.response.headers["content-type"] = MIME_HTML html = et.tostring(transformedOutput) if not html: output = "Loading..." else: output = html # This handles the edge case when output returns no results but # a content-type of html is still expected, say by jQuery's $.ajax # method. This could be avoided if the response returned a valid # xml document while maintaining a content-type of xml. Currently # empty results are rendered as content-length 0 which jQuery fails # on parsing, as it expects xml. elif moduleName and ("xsl" in kwargs) and not output: logger.debug( "Search api got xsl request, but no search results " + "were returned. Setting content-type to html anyway" ) cherrypy.response.headers["content-type"] = MIME_HTML output = "Loading..." # otherwise, return raw contents if util.apply_etag(output): return None else: return output
def getJobAsset(self, sid, asset, compat_mode=True, **kwargs): ''' Returns specific asset for a given job compat_mode: When enabled results in JSON transformed to 4.X variant for results, events and results_preview asset types. ''' compat_mode = splunk.util.normalizeBoolean(compat_mode) job_lite = splunk.search.JobLite(sid) # set response type; default to XML output if 'outputMode' not in kwargs: kwargs['outputMode'] = 'xml' outputMode = kwargs['outputMode'] if outputMode == 'json': ct = splunk.appserver.mrsparkle.MIME_JSON elif outputMode == 'raw': ct = MIME_TEXT elif outputMode == 'csv': ct = MIME_CSV else: outputMode = 'xml' ct = MIME_XML cherrypy.response.headers['content-type'] = ct # if we're exporting, set the correct headers, to get the brower to show a download # dialog. also hardlimit the export cap to 10,000 events. if 'isDownload' in kwargs: if outputMode == 'raw': extension = 'txt' else: extension = outputMode if kwargs.has_key('filename') and len(kwargs["filename"]) > 0: if kwargs['filename'].find('.') > -1: filename = kwargs['filename'] else: filename = "%s.%s" % ( kwargs['filename'], extension) else: filename = "%s.%s" % ( sid.replace('.','_'), extension) # sanitize filenames clean_filename = re.split(r'[\r\n;"\']+', filename.encode("utf-8"))[0] clean_filename = clean_filename[:255] clean_filename = clean_filename.replace(' ', '_') cherrypy.response.headers['content-type'] = 'application/force-download' cherrypy.response.headers['content-disposition'] = 'attachment; filename="%s"' % clean_filename rs = job_lite.getResults('results_preview', 0, 1) # by default, exclude underscore fields except time and raw if 'field_list' not in kwargs: if not rs: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) kwargs['field_list'] = [x for x in rs.fieldOrder() if (not x.startswith('_') or x == '_time' or x == '_raw')] job = splunk.search.getJob(sid) return self.streamJobExport(job, asset, **kwargs) # set default time format if 'time_format' not in kwargs and 'timeFormat' not in kwargs: kwargs['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT') # SPL-34380, if the url will be too long, remove the field_list value. # This is just a bandaid for now, a better solution involves splunkd # patching. url_len = len(urllib.urlencode(kwargs)) if url_len > 8192: # Max url length logger.warn('field_list argument removed in REST call to shorten URL') kwargs.pop('field_list', None) kwargs.pop('f', None) # pass through the search options job_lite.setFetchOption(**kwargs) try: output = job_lite.get(asset) except: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) # TODO: # handle server-side XSL transforms moduleName = cherrypy.request.headers.get('X-Splunk-Module', None) if 'moduleName' in kwargs: moduleName = kwargs.get('moduleName') if outputMode == 'json' and output and compat_mode and asset in ['results_preview', 'results', 'events']: # transform json to pre-5.0 format for backwards compliance try: data = json.loads(output) except: pass else: output = json.dumps(data.get('results', [])) elif moduleName and ('xsl' in kwargs) and output: #logger.debug('search api got xsl request: %s' % moduleName) # get XSL file xslFilePath = os.path.abspath(os.path.expandvars(os.path.join(self.moduleRoster[moduleName]['path'], kwargs['xsl']))) splunkHomePath = os.path.expandvars('$SPLUNK_HOME') if (xslFilePath.startswith(splunkHomePath)): try: f = open(xslFilePath, 'r') xslt_doc = et.parse(f) f.close() # generate transformer transform = et.XSLT(xslt_doc) # transform the XML xmlDoc = et.fromstring(output) transformedOutput = transform(xmlDoc) cherrypy.response.headers['content-type'] = MIME_HTML html = et.tostring(transformedOutput) if not html: output = 'Loading...' else: output = html except Exception, e: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn('Exception occurred while transforming xml results -') output = 'Error occurred while performing xslt transform on results. Please check the logs for errors.' else: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn('File xsl="%s" is out of $SPLUNK_HOME="%s"' % (xslFilePath, splunkHomePath)) output = 'The file you are trying to access is not under the $SPLUNK_HOME directory'
def getJobAsset(self, sid, asset, compat_mode=True, **kwargs): ''' Returns specific asset for a given job compat_mode: When enabled results in JSON transformed to 4.X variant for results, events and results_preview asset types. ''' compat_mode = splunk.util.normalizeBoolean(compat_mode) job_lite = splunk.search.JobLite(sid) # set response type; default to XML output if 'outputMode' not in kwargs: kwargs['outputMode'] = 'xml' outputMode = kwargs['outputMode'] if outputMode == 'json': ct = splunk.appserver.mrsparkle.MIME_JSON elif outputMode == 'raw': ct = MIME_TEXT elif outputMode == 'csv': ct = MIME_CSV else: outputMode = 'xml' ct = MIME_XML cherrypy.response.headers['content-type'] = ct # if we're exporting, set the correct headers, to get the brower to show a download # dialog. also hardlimit the export cap to 10,000 events. if 'isDownload' in kwargs: if outputMode == 'raw': extension = 'txt' else: extension = outputMode if kwargs.has_key('filename') and len(kwargs["filename"]) > 0: if kwargs['filename'].find('.') > -1: filename = kwargs['filename'] else: filename = "%s.%s" % (kwargs['filename'], extension) else: filename = "%s.%s" % (sid.replace('.', '_'), extension) # sanitize filenames clean_filename = re.split(r'[\r\n;"\']+', filename.encode("utf-8"))[0] clean_filename = clean_filename[:255] clean_filename = clean_filename.replace(' ', '_') cherrypy.response.headers[ 'content-type'] = 'application/force-download' cherrypy.response.headers[ 'content-disposition'] = 'attachment; filename="%s"' % clean_filename rs = job_lite.getResults('results_preview', 0, 1) # by default, exclude underscore fields except time and raw if 'field_list' not in kwargs: if not rs: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) kwargs['field_list'] = [ x for x in rs.fieldOrder() if (not x.startswith('_') or x == '_time' or x == '_raw') ] job = splunk.search.getJob(sid) return self.streamJobExport(job, asset, **kwargs) # set default time format if 'time_format' not in kwargs and 'timeFormat' not in kwargs: kwargs['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT') # SPL-34380, if the url will be too long, remove the field_list value. # This is just a bandaid for now, a better solution involves splunkd # patching. url_len = len(urllib.urlencode(kwargs)) if url_len > 8192: # Max url length logger.warn( 'field_list argument removed in REST call to shorten URL') kwargs.pop('field_list', None) kwargs.pop('f', None) # pass through the search options job_lite.setFetchOption(**kwargs) try: output = job_lite.get(asset) except: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) # TODO: # handle server-side XSL transforms moduleName = cherrypy.request.headers.get('X-Splunk-Module', None) if 'moduleName' in kwargs: moduleName = kwargs.get('moduleName') if outputMode == 'json' and output and compat_mode and asset in [ 'results_preview', 'results', 'events' ]: # transform json to pre-5.0 format for backwards compliance try: data = json.loads(output) except: pass else: output = json.dumps(data.get('results', [])) elif moduleName and ('xsl' in kwargs) and output: #logger.debug('search api got xsl request: %s' % moduleName) # get XSL file xslFilePath = os.path.abspath( os.path.expandvars( os.path.join(self.moduleRoster[moduleName]['path'], kwargs['xsl']))) splunkHomePath = os.path.expandvars('$SPLUNK_HOME') if (xslFilePath.startswith(splunkHomePath)): try: f = open(xslFilePath, 'r') xslt_doc = et.parse(f) f.close() # generate transformer transform = et.XSLT(xslt_doc) # transform the XML xmlDoc = et.fromstring(output) transformedOutput = transform(xmlDoc) cherrypy.response.headers['content-type'] = MIME_HTML html = et.tostring(transformedOutput) if not html: output = 'Loading...' else: output = html except Exception, e: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn( 'Exception occurred while transforming xml results -') output = 'Error occurred while performing xslt transform on results. Please check the logs for errors.' else: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn('File xsl="%s" is out of $SPLUNK_HOME="%s"' % (xslFilePath, splunkHomePath)) output = 'The file you are trying to access is not under the $SPLUNK_HOME directory'