log.debug("stdout args for %s: %s" % (incident_config['alert_script'], args_stdout)) log.debug("args for %s: %s" % (incident_config['alert_script'], args)) try: p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False) output = p.communicate(input=args_stdout) log.debug("Alert script run finished. RC=%s. Output: %s" % (p.returncode, output[0])) except OSError, e: log.debug("Alert script failed. Error: %s" % str(e)) log.info("Creating incident for job_id=%s" % job_id) eh = EventHandler(sessionKey=sessionKey) sh = SuppressionHelper(sessionKey=sessionKey) ############################### # Incident creation starts here # Create unique id incident_id = str(uuid.uuid4()) # Parse results and result_id results = getResults(job_path, incident_id) result_id = getResultId(digest_mode, job_path) # Get urgency from results and parse priority job['urgency'] = readUrgencyFromResults(results, incident_config['urgency'], incident_id) job['priority'] = getPriority(job['impact'], job['urgency'])
# Get job details job = getJob(job_id, sessionKey) result_count = job['content']['resultCount'] log.info("Found job for alert '%s' with title '%s'. Context is '%s' with %s results." % (search_name, config['title'], payload.get('app'), result_count)) # Get saved search config savedSearch = getSavedSearch(payload.get('app'), search_name, sessionKey) log.debug("Parsed savedsearch settings: expiry=%s digest_mode=%s" % (savedSearch['content']['alert.expires'], savedSearch['content']['alert.digest_mode'] )) # Parse ttl ttl = getTTL(savedSearch['content']['alert.expires']) # Get helpers eh = EventHandler(sessionKey=sessionKey) sh = SuppressionHelper(sessionKey=sessionKey) # Create unique id incident_id = str(uuid.uuid4()) # Get results and result id results = getResults(payload.get('results_file'), incident_id) result_id = getResultId(savedSearch['content']['alert.digest_mode'], payload.get('results_file')) # Prepare metadata metadata = {} metadata.update({ 'alert': search_name }) metadata.update({ 'alert_time': job['updated'] }) metadata.update({ 'app': payload.get('app') }) # metadata.update({ 'entry': [ job ] }) # The goal here is to reduce event size and limit the job data down to the fields we
start = time.time() # Setup logger log = logging.getLogger('alert_manager_scheduler') fh = logging.handlers.RotatingFileHandler(os.environ.get('SPLUNK_HOME') + "/var/log/splunk/alert_manager_scheduler.log", maxBytes=25000000, backupCount=5) formatter = logging.Formatter("%(asctime)-15s %(levelname)-5s %(message)s") fh.setFormatter(formatter) log.addHandler(fh) log.setLevel(logging.INFO) sessionKey = sys.stdin.readline().strip() splunk.setDefault('sessionKey', sessionKey) eh = EventHandler(sessionKey=sessionKey) sh = SuppressionHelper(sessionKey=sessionKey) #sessionKey = urllib.unquote(sessionKey[11:]).decode('utf8') log.debug("Scheduler started. sessionKey=%s" % sessionKey) # # Get global settings # config = {} config['index'] = 'alerts' restconfig = entity.getEntities('configs/alert_manager', count=-1, sessionKey=sessionKey) if len(restconfig) > 0: if 'index' in restconfig['settings']: config['index'] = restconfig['settings']['index']