def controlJob(self, sid, ctl, action, wait=True, ttl=None): ''' Executes control for a given job ''' resp = JsonResponse() # SDK does not have any functionality to change ACLs, so i have # had to add these custom actions for now. # TODO - if/when SDK has support for changing ACL's, rewrite this # code to not use the Entity class. if (action == "makeWorldReadable" or action == "undoWorldReadable"): jobPath = "search/jobs/" + sid entityName = "acl" aclEntity = en.getEntity( jobPath, entityName, namespace="system", owner=splunk.auth.getCurrentUser()['name']) if (action == "makeWorldReadable"): aclEntity.properties['perms.read'] = ["*"] else: aclEntity.properties['perms.read'] = [] try: result = en.setEntity(aclEntity, uri=jobPath + "/acl") except Exception, e: logger.exception(e) resp.addError( _("Splunk could not update permissions for this job")) return self.render_json(resp)
def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None, wait=True): ''' Returns a listing of jobs that the client needs to be aware of; listing is restricted by user session, and optionally filtered by a whitelist provided by the client ''' resp = JsonResponse() # dump out if no jobs are specified if not s: resp.data = [] return self.render_json(resp) if 0: uri = en.buildEndpoint('search/jobs', '') logger.error("uri: %s" % uri) serverResponse, serverContent = rest.simpleRequest( uri, getargs={ 'id': s, 'output_mode': 'json' }) return serverContent # normalize a single string into a list if isinstance(s, basestring): s = [s] # bypass the legacy sdk blocking for RUNNING state wait = splunk.util.normalizeBoolean(wait) # loop over all all requested jobs and ask server for status listing = [] for requestSID in s: try: job = splunk.search.getJob(requestSID, waitForRunning=wait) listing.append(job.toJsonable()) except splunk.ResourceNotFound: listing.append({'sid': requestSID, '__notfound__': True}) nocache = True # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist resp.addError( _('Splunk could not find a job with sid=%s.') % requestSID) except Exception, e: logger.exception(e) resp.success = False resp.addError(str(e)) return self.render_json(resp)
def dispatchJob(self, wait=True, **kwargs): ''' Dispatches a new job ''' if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS): raise cherrypy.HTTPError( status=400, message= "Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'." ) # setup the dispatch args options = kwargs.copy() q = options['search'] del options['search'] if 'maxEvents' not in options: options['maxEvents'] = EXPORT_HARDLIMIT # ensure that owner and namespace contexts are passed if 'owner' not in options: options['owner'] = cherrypy.session['user'].get('name') if 'namespace' not in options: options['namespace'] = splunk.getDefault('namespace') logger.warn( 'search was dispatched without a namespace specified; defaulting to "%s"' % options['namespace']) # Add the default time format options['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT') # bypass the legacy sdk blocking for RUNNING state wait = splunk.util.normalizeBoolean(wait) options["waitForRunning"] = wait resp = JsonResponse() try: logger.debug('q=%s' % q) logger.debug('options=%s' % options) # We're not going to read/write further from the user's session at this point...if we do, acquire the lock` # This can take significant time when there is a subsearch cherrypy.session.release_lock() job = splunk.search.dispatch( q, sessionKey=cherrypy.session['sessionKey'], **options) resp.data = job.id except splunk.SplunkdConnectionException, e: logger.exception(e) resp.success = False resp.addFatal(str(e))
def field_actions(self, actions, namespace, sid, offset, field_name=None, field_value=None, latest_time=None, view=None, **args): ''' Returns a set of field action response for a given field or event. Assumptions: 1) If field_name and field_value are provided, the data structure returned is representative of the single field. 2) If either both field_name and field_value are absent, the data structure returned is assumed to be for the entire event. ''' # These map to the workflow_actions.conf spec, just to simplify changes in the spec FA_CONF = '/data/ui/workflow-actions' FA_TYPE = 'type' FA_LABEL = 'label' FA_FIELDS = 'fields' FA_EVENTTYPES = 'eventtypes' FA_DISPLAY_LOCATION = 'display_location' FA_DISPLAY_LOCATION_FIELD_MENU = 'field_menu' FA_DISPLAY_LOCATION_EVENT_MENU = 'event_menu' FA_DISPLAY_LOCATION_BOTH = 'both' FA_DISPLAY_LOCATION_DEFAULT = FA_DISPLAY_LOCATION_BOTH # This ensures that only enabled field actions are retrieved FA_FIELD_ACTION_SEARCH = "disabled=0" EVENTTYPE_FIELD_NAME = 'eventtype' resp = JsonResponse() try: offset = int(offset) except ValueError, e: msg = _( "The job's event offset must be a valid integer in order to generate field actions." ) logger.error(msg) resp.addError(msg) return self.render_json(resp)
def uiindex(self, **kwargs): ''' JSONResponse envelope of message data. ''' resp = JsonResponse() try: msg = MsgPoolMgr.get_poolmgr_instance()[UI_MSG_POOL].pop( kwargs['id']) if msg: resp.addMessage(msg.severity.upper(), msg.text) except splunk.SplunkdConnectionException, e: logger.exception(e) resp.success = False resp.addFatal( _('The appserver was unable to connect to splunkd. Check if splunkd is still running. (%s)' ) % e.message)
def index(self, action, **args): ''' JSONResponse envelope of message data. URL: /api/messages/index/ ''' resp = JsonResponse() uri = '/messages' try: entries = splunk.entity.getEntities(uri) msgs = lib.message.get_session_queue().get_all() # Collect all reasons that a restart is required. restart_reason = '' for idx, entry in entries.items(): if idx.startswith('restart_required_reason'): restart_reason += ' ' restart_reason += str(entry[idx]) # Remove individual reason message. del entries[idx] for idx, entry in entries.iteritems(): mapped_msg = self.msg_map.get(entry[idx], entry[idx]) if (idx == 'restart_required') and (len(restart_reason) > 0): # Combine restart_required and restart_required_reason(s) mapped_msg += restart_reason # If this message is removable, then append an id removeLink = entry.getLink('remove') if removeLink is not None: id = "/" + "/".join(entry.id.split('/')[3:]) resp.addWarn(mapped_msg, id=id) else: resp.addWarn(mapped_msg) for msg in msgs: resp.addMessage(msg['level'].upper(), msg['message']) except splunk.AuthenticationFailed, e: logger.debug( 'client not authenticated; no persistent messages retrieved')
def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw): resp = JsonResponse() if sid == None or action == None or not action in JOB_ACTIONS: cherrypy.response.status = 400 resp.success = False resp.addError( _('You must provide a job id(s) and a valid action.')) return self.render_json(resp) if not type(sid) == list: sid = [sid] resp.data = [] action = action.lower() for searchId in sid: try: job = splunk.search.getJob( searchId, sessionKey=cherrypy.session['sessionKey']) if action == 'ttl': response = job.setTTL(ttl) else: actionMethod = getattr(job, action) response = actionMethod() except splunk.ResourceNotFound, e: resp.addError( _('Splunk could not find a job with a job id of %s.') % searchId, sid=searchId) response = False resp.data.append({ 'sid': searchId, 'action': action, 'response': response })
def getJobAsset(self, sid, asset, compat_mode=True, **kwargs): ''' Returns specific asset for a given job compat_mode: When enabled results in JSON transformed to 4.X variant for results, events and results_preview asset types. ''' compat_mode = splunk.util.normalizeBoolean(compat_mode) job_lite = splunk.search.JobLite(sid) # set response type; default to XML output if 'outputMode' not in kwargs: kwargs['outputMode'] = 'xml' outputMode = kwargs['outputMode'] if outputMode == 'json': ct = splunk.appserver.mrsparkle.MIME_JSON elif outputMode == 'raw': ct = MIME_TEXT elif outputMode == 'csv': ct = MIME_CSV else: outputMode = 'xml' ct = MIME_XML cherrypy.response.headers['content-type'] = ct # if we're exporting, set the correct headers, to get the brower to show a download # dialog. also hardlimit the export cap to 10,000 events. if 'isDownload' in kwargs: if outputMode == 'raw': extension = 'txt' else: extension = outputMode if kwargs.has_key('filename') and len(kwargs["filename"]) > 0: if kwargs['filename'].find('.') > -1: filename = kwargs['filename'] else: filename = "%s.%s" % (kwargs['filename'], extension) else: filename = "%s.%s" % (sid.replace('.', '_'), extension) # sanitize filenames clean_filename = re.split(r'[\r\n;"\']+', filename.encode("utf-8"))[0] clean_filename = clean_filename[:255] clean_filename = clean_filename.replace(' ', '_') cherrypy.response.headers[ 'content-type'] = 'application/force-download' cherrypy.response.headers[ 'content-disposition'] = 'attachment; filename="%s"' % clean_filename rs = job_lite.getResults('results_preview', 0, 1) # by default, exclude underscore fields except time and raw if 'field_list' not in kwargs: if not rs: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) kwargs['field_list'] = [ x for x in rs.fieldOrder() if (not x.startswith('_') or x == '_time' or x == '_raw') ] job = splunk.search.getJob(sid) return self.streamJobExport(job, asset, **kwargs) # set default time format if 'time_format' not in kwargs and 'timeFormat' not in kwargs: kwargs['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT') # SPL-34380, if the url will be too long, remove the field_list value. # This is just a bandaid for now, a better solution involves splunkd # patching. url_len = len(urllib.urlencode(kwargs)) if url_len > 8192: # Max url length logger.warn( 'field_list argument removed in REST call to shorten URL') kwargs.pop('field_list', None) kwargs.pop('f', None) # pass through the search options job_lite.setFetchOption(**kwargs) try: output = job_lite.get(asset) except: resp = JsonResponse() cherrypy.response.status = 404 resp.success = False resp.addError("job sid=%s not found" % sid) return self.render_json(resp) # TODO: # handle server-side XSL transforms moduleName = cherrypy.request.headers.get('X-Splunk-Module', None) if 'moduleName' in kwargs: moduleName = kwargs.get('moduleName') if outputMode == 'json' and output and compat_mode and asset in [ 'results_preview', 'results', 'events' ]: # transform json to pre-5.0 format for backwards compliance try: data = json.loads(output) except: pass else: output = json.dumps(data.get('results', [])) elif moduleName and ('xsl' in kwargs) and output: #logger.debug('search api got xsl request: %s' % moduleName) # get XSL file xslFilePath = os.path.abspath( os.path.expandvars( os.path.join(self.moduleRoster[moduleName]['path'], kwargs['xsl']))) splunkHomePath = os.path.expandvars('$SPLUNK_HOME') if (xslFilePath.startswith(splunkHomePath)): try: f = open(xslFilePath, 'r') xslt_doc = et.parse(f) f.close() # generate transformer transform = et.XSLT(xslt_doc) # transform the XML xmlDoc = et.fromstring(output) transformedOutput = transform(xmlDoc) cherrypy.response.headers['content-type'] = MIME_HTML html = et.tostring(transformedOutput) if not html: output = 'Loading...' else: output = html except Exception, e: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn( 'Exception occurred while transforming xml results -') output = 'Error occurred while performing xslt transform on results. Please check the logs for errors.' else: cherrypy.response.headers['content-type'] = MIME_HTML logger.warn('File xsl="%s" is out of $SPLUNK_HOME="%s"' % (xslFilePath, splunkHomePath)) output = 'The file you are trying to access is not under the $SPLUNK_HOME directory'