Esempio n. 1
0
    def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw):

        resp = JsonResponse()

        if sid == None or action == None or not action in JOB_ACTIONS:
            cherrypy.response.status = 400
            resp.success = False
            resp.addError(_("You must provide a job id(s) and a valid action."))
            return self.render_json(resp)

        if not type(sid) == list:
            sid = [sid]

        resp.data = []
        action = action.lower()

        for searchId in sid:
            try:
                job = splunk.search.getJob(searchId, sessionKey=cherrypy.session["sessionKey"])
                if action == "ttl":
                    response = job.setTTL(ttl)
                else:
                    actionMethod = getattr(job, action)
                    response = actionMethod()
            except splunk.ResourceNotFound, e:
                resp.addError(_("Splunk could not find a job with a job id of %s.") % searchId, sid=searchId)
                response = False
            resp.data.append({"sid": searchId, "action": action, "response": response})
Esempio n. 2
0
    def listJobs(self,
                 restrictToSession=True,
                 nocache=False,
                 s=None,
                 cachebuster=None,
                 wait=True):
        '''
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        '''

        resp = JsonResponse()

        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint('search/jobs', '')
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(
                uri, getargs={
                    'id': s,
                    'output_mode': 'json'
                })

            return serverContent

        # normalize a single string into a list
        if isinstance(s, basestring): s = [s]

        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)

        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID, waitForRunning=wait)
                listing.append(job.toJsonable())

            except splunk.ResourceNotFound:
                listing.append({'sid': requestSID, '__notfound__': True})
                nocache = True  # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(
                    _('Splunk could not find a job with sid=%s.') % requestSID)

            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
Esempio n. 3
0
    def dispatchJob(self, wait=True, **kwargs):
        '''
        Dispatches a new job
        '''
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(
                status=400,
                message=
                "Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'."
            )

        # setup the dispatch args
        options = kwargs.copy()
        q = options['search']
        del options['search']

        if 'maxEvents' not in options:
            options['maxEvents'] = EXPORT_HARDLIMIT

        # ensure that owner and namespace contexts are passed
        if 'owner' not in options:
            options['owner'] = cherrypy.session['user'].get('name')
        if 'namespace' not in options:
            options['namespace'] = splunk.getDefault('namespace')
            logger.warn(
                'search was dispatched without a namespace specified; defaulting to "%s"'
                % options['namespace'])

        # Add the default time format
        options['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')

        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        options["waitForRunning"] = wait

        resp = JsonResponse()

        try:
            logger.debug('q=%s' % q)
            logger.debug('options=%s' % options)

            # We're not going to read/write further from the user's session at this point...if we do, acquire the lock`
            # This can take significant time when there is a subsearch
            cherrypy.session.release_lock()

            job = splunk.search.dispatch(
                q, sessionKey=cherrypy.session['sessionKey'], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
Esempio n. 4
0
    def dispatchJob(self, **kwargs):
        """
        Dispatches a new job
        """
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(
                status=400,
                message="Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'.",
            )

        # setup the dispatch args
        options = kwargs.copy()
        q = options["search"]
        del options["search"]

        if "maxEvents" not in options:
            options["maxEvents"] = EXPORT_HARDLIMIT

        # ensure that owner and namespace contexts are passed
        if "owner" not in options:
            options["owner"] = cherrypy.session["user"].get("name")
        if "namespace" not in options:
            options["namespace"] = splunk.getDefault("namespace")
            logger.warn(
                'search was dispatched without a namespace specified; defaulting to "%s"' % options["namespace"]
            )

        # Add the default time format
        options["time_format"] = cherrypy.config.get("DISPATCH_TIME_FORMAT")

        resp = JsonResponse()

        try:
            try:
                userid = cherrypy.session["user"]["name"]
                querylogger.warn("%s\t%s" % (userid, q))
            except:
                pass

            logger.debug("q=%s" % q)
            logger.debug("options=%s" % options)
            job = splunk.search.dispatch(q, sessionKey=cherrypy.session["sessionKey"], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
    def dispatchJob(self, wait=True, **kwargs):
        '''
        Dispatches a new job
        '''
        if not set(kwargs.keys()) >= set(REQUIRED_DISPATCH_ARGUMENTS):
            raise cherrypy.HTTPError(status=400, message="Missing one or more of the required arguments: 'search', 'statusBucketCount', 'earliestTime', 'latestTime'.")

        # setup the dispatch args
        options = kwargs.copy()
        q = options['search']
        del options['search']

        if 'maxEvents' not in options:
            options['maxEvents'] = EXPORT_HARDLIMIT
        
        # ensure that owner and namespace contexts are passed
        if 'owner' not in options:
            options['owner'] = cherrypy.session['user'].get('name')
        if 'namespace' not in options:
            options['namespace'] = splunk.getDefault('namespace')
            logger.warn('search was dispatched without a namespace specified; defaulting to "%s"' % options['namespace'])

        # Add the default time format
        options['time_format'] = cherrypy.config.get('DISPATCH_TIME_FORMAT')
        
        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        options["waitForRunning"] = wait
     
        resp = JsonResponse()

        try:
            logger.debug('q=%s' % q)
            logger.debug('options=%s' % options)

            # We're not going to read/write further from the user's session at this point...if we do, acquire the lock`
            # This can take significant time when there is a subsearch
            cherrypy.session.release_lock()

            job = splunk.search.dispatch(q, sessionKey=cherrypy.session['sessionKey'], **options)
            resp.data = job.id
        except splunk.SplunkdConnectionException, e:
            logger.exception(e)
            resp.success = False
            resp.addFatal(str(e))
Esempio n. 6
0
    def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None):
        """
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        """

        resp = JsonResponse()

        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint("search/jobs", "")
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(uri, getargs={"id": s, "output_mode": "json"})

            return serverContent

        # normalize a single string into a list
        if isinstance(s, basestring):
            s = [s]

        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID)
                listing.append(job.toJsonable())

            except splunk.ResourceNotFound:
                listing.append({"sid": requestSID, "__notfound__": True})
                nocache = (
                    True
                )  # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(_("Splunk could not find a job with sid=%s.") % requestSID)

            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
    def listJobs(self, restrictToSession=True, nocache=False, s=None, cachebuster=None, wait=True):
        '''
        Returns a listing of jobs that the client needs to be aware of;
        listing is restricted by user session, and optionally filtered by
        a whitelist provided by the client
        '''

        resp = JsonResponse()
        
        # dump out if no jobs are specified
        if not s:
            resp.data = []
            return self.render_json(resp)

        if 0:
            uri = en.buildEndpoint('search/jobs', '')
            logger.error("uri: %s" % uri)
            serverResponse, serverContent = rest.simpleRequest(uri, getargs={'id':s, 'output_mode':'json'})
            
            return serverContent
            
        # normalize a single string into a list
        if isinstance(s, basestring): s = [s]
        
        # bypass the legacy sdk blocking for RUNNING state
        wait = splunk.util.normalizeBoolean(wait)
        
        # loop over all all requested jobs and ask server for status
        listing = []
        for requestSID in s:
            try:
                job = splunk.search.getJob(requestSID, waitForRunning=wait)
                listing.append(job.toJsonable())
                
            except splunk.ResourceNotFound:
                listing.append({'sid': requestSID, '__notfound__': True})
                nocache = True # ensure we always bust the cache otherwise, multiple requests may not find out that the job doesn't exist
                resp.addError(_('Splunk could not find a job with sid=%s.') % requestSID)
                
            except Exception, e:
                logger.exception(e)
                resp.success = False
                resp.addError(str(e))
                return self.render_json(resp)
Esempio n. 8
0
    def batchControl(self, ctl, sid=None, action=None, ttl=None, **kw):

        resp = JsonResponse()

        if sid == None or action == None or not action in JOB_ACTIONS:
            cherrypy.response.status = 400
            resp.success = False
            resp.addError(
                _('You must provide a job id(s) and a valid action.'))
            return self.render_json(resp)

        if not type(sid) == list:
            sid = [sid]

        resp.data = []
        action = action.lower()

        for searchId in sid:
            try:
                job = splunk.search.getJob(
                    searchId, sessionKey=cherrypy.session['sessionKey'])
                if action == 'ttl':
                    response = job.setTTL(ttl)
                else:
                    actionMethod = getattr(job, action)
                    response = actionMethod()
            except splunk.ResourceNotFound, e:
                resp.addError(
                    _('Splunk could not find a job with a job id of %s.') %
                    searchId,
                    sid=searchId)
                response = False
            resp.data.append({
                'sid': searchId,
                'action': action,
                'response': response
            })