Beispiel #1
0
def splunk_results_command():
    jobs = service.jobs  # type: ignore
    found = False
    res = []
    for job in jobs:
        if job.sid == demisto.args()['sid']:
            rr = results.ResultsReader(job.results())
            for result in rr:
                if isinstance(result, results.Message):
                    demisto.results({
                        "Type": 1,
                        "ContentsFormat": "json",
                        "Contents": json.dumps(result.message)
                    })
                elif isinstance(result, dict):
                    # Normal events are returned as dicts
                    res.append(result)
            found = True
    if not found:
        demisto.results("Found no job for sid: " + demisto.args()['sid'])
    if found:
        demisto.results({
            "Type": 1,
            "ContentsFormat": "json",
            "Contents": json.dumps(res)
        })
Beispiel #2
0
def parse_batch_of_results(current_batch_of_results, max_results_to_add):
    parsed_batch_results = []
    batch_dbot_scores = []
    results_reader = results.ResultsReader(
        io.BufferedReader(ResponseReaderWrapper(current_batch_of_results)))
    for item in results_reader:
        if isinstance(item, results.Message):
            if "Error in" in item.message:
                raise ValueError(item.message)
            parsed_batch_results.append(convert_to_str(item.message))

        elif isinstance(item, dict):
            if demisto.get(item, 'host'):
                batch_dbot_scores.append({
                    'Indicator': item['host'],
                    'Type': 'hostname',
                    'Vendor': 'Splunk',
                    'Score': 0,
                    'isTypedIndicator': True
                })
            # Normal events are returned as dicts
            parsed_batch_results.append(item)

        if len(parsed_batch_results) >= max_results_to_add:
            break
    return parsed_batch_results, batch_dbot_scores
Beispiel #3
0
    def events_over_time(self, event_name = "", time_range = TimeRange.MONTH, property = ""):
        query = 'search index=%s application=%s event="%s" | timechart span=%s count by %s | fields - _span*' % (
            self.index, self.application_name, (event_name or "*"), 
            time_range,
            (PROPERTY_PREFIX + property) if property else "event",
        )
        job = self.splunk.jobs.create(query, exec_mode="blocking")

        over_time = {}
        reader = results.ResultsReader(job.results())
        for result in reader:
            if isinstance(result, dict):
                # Get the time for this entry
                time = result["_time"]
                del result["_time"]

                # The rest is in the form of [event/property]:count
                # pairs, so we decode those
                for key,count in result.iteritems():
                    # Ignore internal ResultsReader properties
                    if key.startswith("$"):
                        continue

                    entry = over_time.get(key, [])
                    entry.append({
                        "count": int(count or 0),
                        "time": time,
                    })
                    over_time[key] = entry

        return over_time
Beispiel #4
0
def fetch_incidents(service):
    last_run = demisto.getLastRun() and demisto.getLastRun()['time']
    search_offset = demisto.getLastRun().get('offset', 0)

    incidents = []
    current_time_for_fetch = datetime.utcnow()
    if demisto.get(demisto.params(), 'timezone'):
        timezone = demisto.params()['timezone']
        current_time_for_fetch = current_time_for_fetch + timedelta(
            minutes=int(timezone))

    now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
    if demisto.get(demisto.params(), 'useSplunkTime'):
        now = get_current_splunk_time(service)
        current_time_in_splunk = datetime.strptime(now, SPLUNK_TIME_FORMAT)
        current_time_for_fetch = current_time_in_splunk

    if len(last_run) == 0:
        fetch_time_in_minutes = parse_time_to_minutes()
        start_time_for_fetch = current_time_for_fetch - timedelta(
            minutes=fetch_time_in_minutes)
        last_run = start_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)

    earliest_fetch_time_fieldname = demisto.params().get(
        "earliest_fetch_time_fieldname", "index_earliest")
    latest_fetch_time_fieldname = demisto.params().get(
        "latest_fetch_time_fieldname", "index_latest")

    kwargs_oneshot = {
        earliest_fetch_time_fieldname: last_run,
        latest_fetch_time_fieldname: now,
        "count": FETCH_LIMIT,
        'offset': search_offset
    }

    searchquery_oneshot = demisto.params()['fetchQuery']

    if demisto.get(demisto.params(), 'extractFields'):
        extractFields = demisto.params()['extractFields']
        extra_raw_arr = extractFields.split(',')
        for field in extra_raw_arr:
            field_trimmed = field.strip()
            searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed

    oneshotsearch_results = service.jobs.oneshot(
        searchquery_oneshot, **kwargs_oneshot)  # type: ignore
    reader = results.ResultsReader(oneshotsearch_results)
    for item in reader:
        inc = notable_to_incident(item)
        incidents.append(inc)

    demisto.incidents(incidents)
    if len(incidents) < FETCH_LIMIT:
        demisto.setLastRun({'time': now, 'offset': 0})
    else:
        demisto.setLastRun({
            'time': last_run,
            'offset': search_offset + FETCH_LIMIT
        })
Beispiel #5
0
 def test_read_from_empty_result_set(self):
     job = self.service.jobs.create(
         "search index=_internal_does_not_exist | head 2")
     while not job.is_done():
         sleep(0.5)
     self.assertEqual(
         0,
         len(list(results.ResultsReader(io.BufferedReader(job.results())))))
Beispiel #6
0
def pretty():
    reader = results.ResultsReader(sys.stdin)
    while True:
        kind = reader.read()
        if kind == None: break
        if kind == results.RESULT:
            event = reader.value
            pprint(event)
Beispiel #7
0
def get_data(response):
    reader = results.ResultsReader(response)

    # Running through results dictionary and making key value pairs
    for result in reader:
        for key, value in result.items():
            print key + "=" + value,
        print
Beispiel #8
0
def pretty(response):
    reader = results.ResultsReader(response)
    while True:
        kind = reader.read()
        if kind == None: break
        if kind == results.RESULT:
            event = reader.value
            pprint(event)
Beispiel #9
0
def display(response):
    reader = results.ResultsReader(response)
    typer.secho("---- Failed Logins ---", fg=typer.colors.MAGENTA)
    for result in reader:
        if isinstance(result, dict):
            login = ("timestamp={2} user={0} src=:{1}").format(
                result["user"], result["src"], result["timestamp"])
            typer.echo(login)
Beispiel #10
0
    def splunk(self):
        service = client.connect(host=SPLUNK_HOST, port=SPLUNK_PORT, scheme=SPLUNK_SCHEME, username=SPLUNK_USERNAME,
                                 password=SPLUNK_PASSWORD)
        # 判断重连操作
        self.redis_r = self.tryConnetRedis()

        if 'conf_splunklog_rule' in self.redis_r.hkeys("passive_config"):
            info = eval(self.redis_r.hget('passive_config', 'conf_splunklog_rule'))
        else:
            self.redis_r.hset('passive_config', 'conf_splunklog_rule', rule)
            info = rule

        query = info['query']
        earliest_time = info['earliest_time']
        max_time = info['max_time']
        kwargs_normalsearch = {"exec_mode": "normal",
                               "earliest_time": earliest_time,
                               "latest_time": "now",
                               "max_time": int(max_time),
                               "timeout": 120}

        job = service.jobs.create(query=query, **kwargs_normalsearch)

        while True:
            while not job.is_ready():
                pass
            stats = {"isDone": job["isDone"],
                     "doneProgress": float(job["doneProgress"]) * 100,
                     "scanCount": int(job["scanCount"]),
                     "eventCount": int(job["eventCount"]),
                     "resultCount": int(job["resultCount"])}

            status = ("\r%(doneProgress)03.1f%%   %(scanCount)d scanned   "
                      "%(eventCount)d matched   %(resultCount)d results") % stats

            sys.stdout.write(status)
            sys.stdout.flush()
            if stats["isDone"] == "1":
                break
            time.sleep(2)

        print "\ntotal time: %s" % job["runDuration"]

        r = self.tryConnetRedis()

        for result in results.ResultsReader(job.results()):
            # GET请求中ng_request_url_short然而并不会存在?,但是为了保持算法一致,还是进行分割一下
            MD5 = self.md5('GET' + result['ng_request_url_short'].split('?')[0])
            request_json = {
                'method': 'GET' if result['ng_request_url_short'].strip().upper() == 'GET' else 'POST',
                'protocol': 'http://',
                'domain': result['ng_request_domain'],
                'ng_request_url_short': result['ng_request_url_short'],
                'arg': result['ng_query']
            }
            self.redis_r.set('DataSort_' + MD5, request_json)
        r.execute_command("QUIT")
        job.cancel()
Beispiel #11
0
def run_modified_splunk_search(splunk_host, splunk_password, search1, search2,
                               detection_name, detection_file, earliest_time,
                               latest_time):
    try:
        service = client.connect(host=splunk_host,
                                 port=8089,
                                 username='******',
                                 password=splunk_password)
    except Exception as e:
        print("Unable to connect to Splunk instance: " + str(e))
        return 1, {}

    if not search1.startswith('|'):
        search1 = 'search ' + search1

    if not search2.startswith('|'):
        search2 = 'search ' + search2

    kwargs = {"dispatch.earliest_time": "-1d", "dispatch.latest_time": "now"}

    try:
        job = service.jobs.export(search1, **kwargs)
    except Exception as e:
        print("Unable to execute detection: " + str(e))
        return 1, {}

    reader = results.ResultsReader(job)
    results_search_malicious = []
    for result in reader:
        if isinstance(result, dict):
            results_search_malicious.append(result)

    try:
        job = service.jobs.export(search2, **kwargs)
    except Exception as e:
        print("Unable to execute detection: " + str(e))
        return 1, {}

    reader = results.ResultsReader(job)
    results_search_not_malicious = []
    for result in reader:
        if isinstance(result, dict):
            results_search_not_malicious.append(result)

    return results_search_malicious, results_search_not_malicious
Beispiel #12
0
    def __assert_transposed_results(self, service, index_name):
        search_results = service.jobs.oneshot(
            f"search index={index_name} | head 10 | transpose")

        reader = results.ResultsReader(search_results)
        for result in reader:
            assert result.get(
                "column"
            ) is not None, f"Attribute 'column' not found in a row {result}"
Beispiel #13
0
def _build_results_dict_from_sdk_response(response):
    """
    Get results from the SDK and return them.
    """
    reader = results.ResultsReader(response)
    resultset = []
    for result in reader:
        resultset.append(result)
    return resultset
Beispiel #14
0
def _build_events_from_sdk_response(response):
    """
    Get results from the SDK and return them.
    """
    reader = results.ResultsReader(response)
    events = []
    for result in reader:
        events.append(_build_event_from_results_reader(result))
    return events
        def __getIdFromDataRow(self,dataRow):
                result = None
                for result in results.ResultsReader(dataRow.results()):
                        pass

                if result == None:
                        return None
                else:
                        return result[1]["id"]
def run_blocking_mode_search(splunk_service,search_string,payload={}):
    try:
        job = splunk_service.jobs.create(search_string,**payload)
        print(job.content)
        for result in results.ResultsReader(job.results()):
            print(result)

    except Exception as e:
        print(e)
Beispiel #17
0
    def search(self,
               spl,
               mode="normal",
               search_args=None,
               verbose=False,
               days=None,
               start_time=None,
               end_time=None):
        '''
        Search Splunk and return the results as a list of dicts.

        spl: A string containing the Splunk search in SPL form
        mode: A string specifying the type of Splunk search to run ("normal" or "realtime")
        search_args: A dict containing any additional search parameters to pass to
              the Splunk server.
        days: Search the past X days. If provided, this supercedes both start_time
              and end_time.
        start_time: A datetime() object representing the start of the search
                    window, or a string in Splunk syntax (e.g., "-2d@d"). If used
                    without end_time, the end of the search window is the current time.
        end_time: A datetime() object representing the end of the search window, or a
                  string in Splunk syntax (e.g., "-2d@d"). If used without start_time,
                  the search start will be the earliest timestamp in Splunk.
        verbose: If True, any errors, warnings or other messages encountered
                 by the search process will be printed to stdout.  The default is False
                 (suppress these messages).
        '''
        if not search_args or not isinstance(search_args, dict):
            search_args = dict()
        search_args["search_mode"] = mode

        if days:
            # Search from current time backwards
            search_args["earliest_time"] = "-%dd" % days
        else:
            if start_time:
                # convert to string if it's a datetime
                if isinstance(start_time, datetime):
                    start_time = start_time.isoformat()
                search_args["earliest_time"] = start_time
            if end_time:
                # convert to string if it's a datetime
                if isinstance(end_time, datetime):
                    end_time = end_time.isoformat()
                search_args["latest_time"] = end_time

        # Use the "export" job type, since that's the most reliable way to return possibly large result sets
        export_results = self.splunk_conn.jobs.export(spl, **search_args)

        reader = results.ResultsReader(export_results)

        for res in reader:
            if isinstance(res, dict):
                yield res
            elif isinstance(res, results.Message) and verbose:
                print("Message: %s" % res)
Beispiel #18
0
def get_mapping_fields_command(service):
    # Create the query to get unique objects
    # The logic is identical to the 'fetch_incidents' command
    type_field = demisto.params().get('type_field', 'source')
    total_parsed_results = []
    search_offset = demisto.getLastRun().get('offset', 0)

    current_time_for_fetch = datetime.utcnow()
    dem_params = demisto.params()
    if demisto.get(dem_params, 'timezone'):
        timezone = dem_params['timezone']
        current_time_for_fetch = current_time_for_fetch + timedelta(
            minutes=int(timezone))

    now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)
    if demisto.get(dem_params, 'useSplunkTime'):
        now = get_current_splunk_time(service)
        current_time_in_splunk = datetime.strptime(now, SPLUNK_TIME_FORMAT)
        current_time_for_fetch = current_time_in_splunk

    fetch_time_in_minutes = parse_time_to_minutes()
    start_time_for_fetch = current_time_for_fetch - timedelta(
        minutes=fetch_time_in_minutes)
    last_run = start_time_for_fetch.strftime(SPLUNK_TIME_FORMAT)

    earliest_fetch_time_fieldname = dem_params.get(
        "earliest_fetch_time_fieldname", "earliest_time")
    latest_fetch_time_fieldname = dem_params.get("latest_fetch_time_fieldname",
                                                 "latest_time")

    kwargs_oneshot = {
        earliest_fetch_time_fieldname: last_run,
        latest_fetch_time_fieldname: now,
        "count": FETCH_LIMIT,
        'offset': search_offset
    }

    searchquery_oneshot = dem_params['fetchQuery']

    if demisto.get(dem_params, 'extractFields'):
        extractFields = dem_params['extractFields']
        extra_raw_arr = extractFields.split(',')
        for field in extra_raw_arr:
            field_trimmed = field.strip()
            searchquery_oneshot = searchquery_oneshot + ' | eval ' + field_trimmed + '=' + field_trimmed

    searchquery_oneshot = searchquery_oneshot + ' | dedup ' + type_field
    oneshotsearch_results = service.jobs.oneshot(
        searchquery_oneshot, **kwargs_oneshot)  # type: ignore
    reader = results.ResultsReader(oneshotsearch_results)
    for item in reader:
        inc = notable_to_incident(item)
        total_parsed_results.append(inc)

    types_map = create_mapping_dict(total_parsed_results, type_field)
    demisto.results(types_map)
Beispiel #19
0
def check_in_splunk(troll):
    s = client.connect(host=SPLUNK_HOST,
                       port="443",
                       username=SPLUNK_USER,
                       password=SPLUNK_PASS)

    q = f"search index=\"gameday-{TEAM}\" first_name=\"{troll['first_name']}\" last_name=\"{troll['last_name']}\""
    r = s.jobs.oneshot(q)

    return [item for item in results.ResultsReader(r)]
Beispiel #20
0
 def retrieving_splunk_events(self):
     jobs = self.service.jobs
     entries = list()
     kwargs_blockingsearch = {"exec_mode": "blocking"}
     searchquery_blocking = "search * | head 100"
     job = jobs.create(searchquery_blocking, **kwargs_blockingsearch)
     for result in results.ResultsReader(job.results()):
         print(result)
         entries.append(self.transform_to_logentry(result))
     return entries
Beispiel #21
0
def timeit():
    start = time.time()
    reader = results.ResultsReader(sys.stdin)
    count = 0
    while True:
        kind = reader.read()
        if kind == None: break
        if kind == results.RESULT: count += 1
    delta = time.time() - start
    print "%d results in %f secs = %f results/sec" % (count, delta, count/delta)
Beispiel #22
0
 def test_export(self):
     jobs = self.service.jobs
     stream = jobs.export("search index=_internal earliest=-1m | head 3")
     result = results.ResultsReader(stream)
     ds = list(result)
     self.assertEqual(result.is_preview, False)
     self.assertTrue(isinstance(ds[0], dict) or \
                         isinstance(ds[0], results.Message))
     nonmessages = [d for d in ds if isinstance(d, dict)]
     self.assertTrue(len(nonmessages) <= 3)
Beispiel #23
0
def counts(job, result_key):
    applications = []
    reader = results.ResultsReader(job.results())
    for result in reader:
        if isinstance(result, dict):
            applications.append({
                "name": result[result_key],
                "count": int(result["count"] or 0)
            })
    return applications
Beispiel #24
0
    def check_accuracy(self, search_string, feature_fields, class_field):
        '''
			check_accuracy(search_string, feature_fields, class_field)

			search_string: string to use in the splunk search to narrow events
			feature_fields: which fields to use to predict
			class_field: field to predict

			returns: accuracy of prediction

			notes: assumes that classifier is already trained. calls predict on each event.
		'''
        # 1: check that classifier is trained:
        if not self.trained:
            raise 'classifier is not trained'

        # 2: predict the search string
        splunk_search = 'search %s | table *' % search_string
        search_kwargs = {
            'timeout': 1000,
            'exec_mode': 'blocking'
        }  #required fields set to all - check API ("rf=*")
        job = self.jobs.create(splunk_search, **search_kwargs)
        result_count = int(job["resultCount"])

        # 3: iterate and tally accuracy
        correct = 0
        total = 0
        offset = 0
        count = 100
        while (offset < result_count):
            print "offset: %s" % offset
            kwargs_paginate = {'count': count, 'offset': offset}
            search_results = job.results(**kwargs_paginate)
            for result in results.ResultsReader(search_results):
                try:
                    if result[class_field] == self.predict(
                            feature_fields, class_field, result):
                        correct += 1
                        total += 1
                    else:
                        total += 1
                except:
                    continue  #tochange

            offset += count
            print "curr acc: %f" % (float(correct) / total)

        # 4: calculate percentage
        perc_correct = float(correct) / total
        self.accuracy = perc_correct
        self.accuracy_tested = True

        # 5: return
        return perc_correct
Beispiel #25
0
def collect_events(helper, ew):
    global_account = helper.get_arg('global_account')
    cloud = helper.get_arg('cloud')
    apikey = helper.get_arg('apikey')
    username = global_account['username']
    password = global_account['password']

    # Get list of MD5's pending detonation
    #session_key = helper.get_arg('session_key')
    #input_name, input_items = inputs.inputs.popitem()
    session_key = helper.context_meta['session_key']
    #session_key = "dummy"
    md5List = get_md5_list(username, password, session_key)

    #Set envvars based on clear creds
    os.environ["ZIA_USERNAME"] = username
    os.environ["ZIA_PASSWORD"] = password
    os.environ["ZIA_API"] = apikey

    #API Login
    helper.log_info("Login to Zscaler API: %s" % username)

    z = zscaler_python_sdk.zscaler()
    z.get_zia_creds_from_env(True)
    z.set_cloud(cloud)
    z.authenticate_zia_api()

    helper.log_info("Login Success")

    # Get the results and display them using the ResultsReader
    reader = results.ResultsReader(md5List)
    for item in reader:
        if (item["md5"] == "none"):
            helper.log_info("STOP: No queued MD5")
            break
        helper.log_info("Checking Zscaler Sandbox for MD5 : %s" % item["md5"])
        quota = z.check_sandbox_quota()
        #print(quota)
        helper.log_info("Sandbox current quota : %s" % quota)

        while quota['unused'] <= 0:
            quota = z.check_sandbox_quota()
            helper.log_info("waiting 1 sec...\tquota_left[" +
                            str(quota['unused']) + "']")

            time.sleep(1)

        helper.log_info("Loading Zscaler Sandbox for MD5 : %s" % item["md5"])
        report = z.get_sandbox_report(item["md5"], "full")
        #helper.log_info("Sandbox REPORT : %s" % report.text)
        #print(item["md5"])
        event = Event()
        #event.stanza = input_name
        event.data = report.text
        ew.write_event(event)
Beispiel #26
0
def search_splunk(conn, search):
    jobs = conn.jobs
    kwargs = {'exec_mode': 'blocking'}

    job = jobs.create(search, **kwargs)
    rs = job.results(count=0)

    for result in results.ResultsReader(
            io.BufferedReader(ResponseReaderWrapper(rs))):
        yield result
    job.cancel()
Beispiel #27
0
 def _exec_search(self, service, search):
     self._logger().info('action=about_to_execute_pending_queries_search search="%s"', search)
     job = service.jobs.create(search, **{'exec_mode': 'blocking'})
     self._logger().debug('action=finish_pending_queries_search job="%s"', job.content)
     if job.content.get('messages') and job.content.get('messages').get('error'):
         raise RuntimeError(job.content.messages['error'])
     else:
         kwargs_paginate = {'count': 0}
         search_results = job.results(**kwargs_paginate)
         reader = results.ResultsReader(search_results)
         return list(reader)
Beispiel #28
0
def runQurey(keys, *queryname):
    print("runQuery function called")
    job = service.jobs.create(queryname[0], **kwargs_normalsearch)
    while True:
        if job.is_done() == True:
            print("running job")
            break
    output = results.ResultsReader(job.results(count=0))
    job.cancel()
    print("calling dataframe function now")
    myDataframe(output, keys, *queryname)
Beispiel #29
0
def search(splunk_host, splunk_password, search_name, log):

    print('\nexecute savedsearch: ' + search_name + '\n')

    service = client.connect(host=splunk_host,
                             port=8089,
                             username='******',
                             password=splunk_password)

    # Retrieve the new search
    mysavedsearch = service.saved_searches[search_name]

    kwargs = {
        "disabled": False,
        "dispatch.earliest_time": "-60m",
        "dispatch.latest_time": "now"
    }

    # Enable savedsearch and adapt the scheduling time
    mysavedsearch.update(**kwargs).refresh()

    # Run the saved search
    job = mysavedsearch.dispatch()

    # Create a small delay to allow time for the update between server and client
    sleep(2)

    # Wait for the job to finish--poll for completion and display stats
    while True:
        job.refresh()
        stats = {
            "isDone": job["isDone"],
            "doneProgress": float(job["doneProgress"]) * 100,
            "scanCount": int(job["scanCount"]),
            "eventCount": int(job["eventCount"]),
            "resultCount": int(job["resultCount"])
        }
        status = ("\r%(doneProgress)03.1f%%   %(scanCount)d scanned   "
                  "%(eventCount)d matched   %(resultCount)d results") % stats

        sys.stdout.write(status)
        sys.stdout.flush()
        if stats["isDone"] == "1":
            break
        sleep(2)

    # Get the results and display them
    for result in results.ResultsReader(job.results()):
        print()
        print(result)

    # disable the savedsearch
    kwargs = {"disabled": True}
    mysavedsearch.update(**kwargs).refresh()
Beispiel #30
0
def getSearch(search, service):
    #print "/*Busqueda: " + search + "*/"
    # Retrieve the new search
    logCreator('INFO', 'Executing search of %s' % search)
    logCreator('DEBUG', 'Trying to get the new search')
    try:
        mysavedsearch = service.saved_searches[search]

        # Run the saved search
        job = mysavedsearch.dispatch()
    except:
        print "Error trying to connect Splunk"
        logCreator('ERROR',
                   'Splunk error: Splunk search %s not found' % search)
        sys.exit(1)
    logCreator('DEBUG', 'Search %s is OK' % search)

    # Create a small delay to allow time for the update between server and client
    sleep(2)

    # Wait for the job to finish--poll for completion and display stats
    while True:
        job.refresh()
        stats = {
            "isDone": job["isDone"],
            "doneProgress": float(job["doneProgress"]) * 100,
            "scanCount": int(job["scanCount"]),
            "eventCount": int(job["eventCount"]),
            "resultCount": int(job["resultCount"])
        }
        status = ("\r%(doneProgress)03.1f%%   %(scanCount)d scanned   "
                  "%(eventCount)d matched   %(resultCount)d results") % stats

        if stats["isDone"] == "1":
            break
        sleep(2)

    # Display the search results now that the job is done
    try:
        jobresults = job.results()
    except:
        logCreator('ERROR', 'No valid answer from Splunk (Invalid licence?)')
        exit(1)

    #print jobresults

    # Finally, we got a reader and the number of results:
    answer = {}
    reader = results.ResultsReader(jobresults)
    answer['Reader'] = reader
    numResults = job["resultCount"]
    answer['Results'] = numResults

    return answer