def getEntity(entityPath, name, namespace=None, owner=None, sessionKey=None, hostPath=None, available=True): conn = en.getEntity(entityPath, name, namespace=namespace, owner=owner, sessionKey=sessionKey, hostPath=hostPath) params = conn.properties if available: for n, v in params.items(): if n in ["disabled"] and util.normalizeBoolean(v): raise ResourceNotFound(msg="%s Resource is disabled." % name) elif n.endswith(".disabled"): if v in ["-1"]: raise ResourceNotFound(msg="%s in %s Resource not found." % (n[:len(n) - 9], name)) elif util.normalizeBoolean(v): raise ResourceNotFound( msg="%s in %s Resource is disabled." % (n[:len(n) - 9], name)) return conn
def _setup_smtp(self, payload): """ Setup smtp to send out a group of emails. """ namespace = payload.get('namespace', 'splunk_app_infrastructure') sessionKey = payload.get('session_key') self.ssContent = self.ssContent if self.ssContent else self.getAlertActions(sessionKey, namespace) use_ssl = normalizeBoolean(self.ssContent.get('use_ssl', False)) use_tls = normalizeBoolean(self.ssContent.get('use_tls', False)) server = self.ssContent.get('mailserver', 'localhost') username = self.ssContent.get('auth_username', '') password = self.ssContent.get('clear_password', '') # setup the Open SSL Context sslHelper = ssl_context.SSLHelper() serverConfJSON = sslHelper.getServerSettings(sessionKey) # Pass in settings from alert_actions.conf into context ctx = sslHelper.createSSLContextFromSettings( sslConfJSON=self.ssContent, serverConfJSON=serverConfJSON, isClientContext=True) # send the mail if not use_ssl: smtp = secure_smtplib.SecureSMTP(host=server) else: smtp = secure_smtplib.SecureSMTP_SSL(host=server, sslContext=ctx) # smtp.set_debuglevel(1) if use_tls: smtp.starttls(ctx) if len(username) > 0 and len(password) > 0: smtp.login(username, password) return smtp
def _renderTable(self, pdfRenderer, view): """ render a table of results """ # get options options = view.getOptions() if "show" in options and not normalizeBoolean(options['show']): return fieldFormats = view.getFieldFormats() displayRowNumbers = False if "displayRowNumbers" in options: displayRowNumbers = normalizeBoolean(options['displayRowNumbers']) overlay = None if "dataOverlayMode" in options: overlay = options['dataOverlayMode'] if "overlay" in options: overlay = options['overlay'] resultsExist = False # get results object view.getSearchJobObj().setFetchOptions(output_mode="json_cols", time_format=pt.TIME_RAW_FORMAT) results = view.getSearchJobResults() # determine field set explicitFieldList = view.getSearchFieldList() fieldOrder = [] if len(explicitFieldList) > 0: for field in explicitFieldList: if field in results.fieldOrder and field not in fieldOrder: fieldOrder.append(field) if len(fieldOrder) == 0: logger.warning("%s: results.fieldOrder does not contain any explicitly specified fields: %s" % (view.getTitle(), explicitFieldList)) return else: fieldOrder = self._renderTable_restrictFields(results.fieldOrder) if len(fieldOrder) == 0: pdfRenderer.renderText("No results found.") return tableData = pt.TableData(columnNames=fieldOrder) for i, result in enumerate(results): if i >= self._maxRowsPerTable: break if (i > 0) and ((i % 100) == 0): self._keepAllSearchesAlive() resultsExist = True tableData.addRowFromSearchResult(result) if resultsExist: pdfRenderer.renderTable(tableData, title=view.getSubtitle() or view.getTitle(), displayLineNumbers=displayRowNumbers, fieldFormats=fieldFormats, overlayMode=overlay) else: logger.warning("PDFGenHandler::_renderTable> no results for table") pdfRenderer.renderText("No results found.")
def send_mail_screenshot(settings, payload, session_key, file_type): ''' Setup connection, attach file to mail and send out mail ''' # Get Mail object email = build_email_object(settings, payload) email.attach(build_mime_attachment(file_type)) sender = email['From'] use_ssl = normalizeBoolean(settings.get('.use_ssl', False)) use_tls = normalizeBoolean(settings.get('use_tls', False)) server = settings.get('mailserver', 'localhost') username = settings.get('auth_username', '') password = settings.get('clear_password', '') recipients = [] if email['To']: recipients.extend(EMAIL_DELIM.split(email['To'])) # Clear leading / trailing whitespace from recipients recipients = [r.strip() for r in recipients] mail_log_msg = 'Sending email. subject="%s", recipients="%s", server="%s"' % ( email['subject'], str(recipients), str(server) ) try: # make sure the sender is a valid email address if sender.find("@") == -1: sender = sender + '@' + socket.gethostname() if sender.endswith("@"): sender = sender + 'localhost' # setup the Open SSL Context sslHelper = ssl_context.SSLHelper() serverConfJSON = sslHelper.getServerSettings(session_key) # Pass in settings from alert_actions.conf into context # Version 6.6 try: ctx = sslHelper.createSSLContextFromSettings( sslConfJSON=settings, # TODO: Check for error because this must be commented to work on customer site serverConfJSON=serverConfJSON, isClientContext=True) except Exception, e: print >> sys.stderr, "WARN Setting up SSL context with Splunk > 6.5.x version not possible: %s" % e try: # Version 6.4 ctx = sslHelper.createSSLContextFromSettings( confJSON=settings, sessionKey=session_key, isClientContext=True) except Exception, e: print >> sys.stderr, "WARN Setting up SSL context with Splunk < 6.6.x version not possible: %s" % e raise
def buildAttachments(settings, ssContent, results, email, jobCount): ssContent['errorArray'] = [] sendpdf = normalizeBoolean(ssContent.get('action.email.sendpdf', False)) sendcsv = normalizeBoolean(ssContent.get('action.email.sendcsv', False)) sendresults = normalizeBoolean( ssContent.get('action.email.sendresults', False)) inline = normalizeBoolean(ssContent.get('action.email.inline', False)) inlineFormat = ssContent.get('action.email.format') type = ssContent['type'] namespace = settings['namespace'] owner = settings['owner'] sessionKey = settings['sessionKey'] searchid = settings.get('sid') pdfview = ssContent.get('action.email.pdfview', '') subject = ssContent.get("action.email.subject") ssName = ssContent.get("name") server = ssContent.get('action.email.mailserver', 'localhost') results_link = ssContent.get('results_link') paperSize = ssContent.get('action.email.reportPaperSize', 'letter') paperOrientation = ssContent.get('action.email.reportPaperOrientation', 'portrait') pdfService = None pdf = None if sendpdf: import splunk.pdf.availability as pdf_availability pdfService = pdf_availability.which_pdf_service(sessionKey=sessionKey, viewId=pdfview, namespace=namespace, owner=owner) logger.info("sendemail pdfService = %s" % pdfService) try: if pdfService is "pdfgen": # will raise an Exception on error pdf = generatePDF(server, subject, searchid, settings, pdfview, ssName, paperSize, paperOrientation) elif pdfService is "deprecated": # will raise an Exception on error pdf = generatePDF_deprecated(results_link, subject, searchid, settings, pdfview, paperSize, paperOrientation) except Exception, e: logger.error("An error occurred while generating a PDF: %s" % e) ssContent['errorArray'].append( "An error occurred while generating the PDF. Please see python.log for details." ) if pdf: email.attach(pdf)
def __init__(self, *args, **kwargs): self.cron_schedule = kwargs.get('cron_schedule', None) self.realtime_schedule = util.normalizeBoolean( kwargs.get('realtime_schedule', None)) self.default_owner = kwargs.get('default_owner', None) self.default_status = kwargs.get('default_status', None) self.description = kwargs.get('description', None) self.domain = kwargs.get('domain', CorrelationSearch.DEFAULT_SECURITY_DOMAIN) self.drilldown_name = kwargs.get('drilldown_name', None) self.drilldown_search = kwargs.get('drilldown_search', None) self.drilldown_earliest_offset = kwargs.get( 'drilldown_earliest_offset', None) self.drilldown_latest_offset = kwargs.get('drilldown_latest_offset', None) self.end_time = kwargs.get('end_time', None) self.enabled = kwargs.get('enabled', True) self.name = kwargs.get('name', None) self.namespace = kwargs.get('namespace', CorrelationSearch.DEFAULT_NAMESPACE) self.owner = kwargs.get('owner', CorrelationSearch.DEFAULT_OWNER) self.rule_description = kwargs.get('rule_description', None) self.rule_title = kwargs.get('rule_title', None) self.search = kwargs.get('search', None) self.search_spec = kwargs.get('search_spec', None) self.severity = Severity.from_readable_severity( kwargs.get('severity', "unknown")) self.sid = kwargs.get('sid', None) self.start_time = kwargs.get('start_time', None) self.gs_service_id = kwargs.get('gs_service_id', None) self.next_steps = kwargs.get('next_steps', None) self.recommended_actions = kwargs.get('recommended_actions', None) if self.sid is not None: # This may be an existing search. Namespace and owner get loaded in get_rest_info # instead of here, since we need to have the data for static methods as well. self.namespace = None self.owner = None # Throttling parameters apply to ALL alert actions. # Note: aggregate_duration is a Splunk time specifier, so we force the conversion. self.aggregate_duration = str(kwargs.get('aggregate_duration', '')) self.group_by = kwargs.get('group_by', None) # Summary index alert action parameters. # Default action is to create notable event. self.notable_action_enabled = util.normalizeBoolean( kwargs.get('action.notable', True)) # modular alert settings self.alert_actions = { k: v for (k, v) in kwargs.iteritems() if k.startswith('action.') }
def decorator(self, *args, **kwargs): sessionKey = cherrypy.session.get('sessionKey') en = entity.getEntity('services/server/info', 'server-info', sessionKey=sessionKey) kwargs.update({ 'build': int(en['build']), 'isFree': util.normalizeBoolean(en['isFree']), 'isTrial': util.normalizeBoolean(en['isTrial']), 'version': en['version'] }) return fn(self, *args, **kwargs)
def mail(email, argvals, ssContent): sender = email['From'] use_ssl = normalizeBoolean(ssContent.get('action.email.use_ssl', False)) use_tls = normalizeBoolean(ssContent.get('action.email.use_tls', False)) server = ssContent.get('action.email.mailserver', 'localhost') username = argvals.get('username', '') password = argvals.get('password', '') recipients = [] if email['To']: recipients.extend(EMAIL_DELIM.split(email['To'])) if email['Cc']: recipients.extend(EMAIL_DELIM.split(email['Cc'])) if email['Bcc']: recipients.extend(EMAIL_DELIM.split(email['Bcc'])) # Clear leading / trailing whitespace from recipients recipients = [r.strip() for r in recipients] mail_log_msg = 'Sending email. subject="%s", results_link="%s", recipients="%s", server="%s"' % ( ssContent.get('action.email.subject'), ssContent.get('results_link'), str(recipients), str(server) ) try: # make sure the sender is a valid email address if sender.find("@") == -1: sender = sender + '@' + socket.gethostname() if sender.endswith("@"): sender = sender + 'localhost' # send the mail if not use_ssl: smtp = smtplib.SMTP(server) else: smtp = smtplib.SMTP_SSL(server) if use_tls: smtp.starttls() if len(username) > 0 and len(password) >0: smtp.login(username, password) #logger.info('email = %s', email.as_string()) smtp.sendmail(sender, recipients, email.as_string()) smtp.quit() logger.info(mail_log_msg) except Exception, e: logger.error(mail_log_msg) raise
def mail(email, argvals, ssContent): sender = email['From'] use_ssl = normalizeBoolean(ssContent.get('action.email.use_ssl', False)) use_tls = normalizeBoolean(ssContent.get('action.email.use_tls', False)) server = ssContent.get('action.email.mailserver', 'localhost') username = argvals.get('username', '') password = argvals.get('password', '') recipients = [] if email['To']: recipients.extend(EMAIL_DELIM.split(email['To'])) if email['Cc']: recipients.extend(EMAIL_DELIM.split(email['Cc'])) if email['Bcc']: recipients.extend(EMAIL_DELIM.split(email['Bcc'])) # Clear leading / trailing whitespace from recipients recipients = [r.strip() for r in recipients] mail_log_msg = 'Sending email. subject="%s", results_link="%s", recipients="%s"' % ( ssContent.get('action.email.subject'), ssContent.get('results_link'), str(recipients)) try: # make sure the sender is a valid email address if sender.find("@") == -1: sender = sender + '@' + socket.gethostname() if sender.endswith("@"): sender = sender + 'localhost' # send the mail if not use_ssl: smtp = smtplib.SMTP(server) else: smtp = smtplib.SMTP_SSL(server) if use_tls: smtp.starttls() if len(username) > 0 and len(password) > 0: smtp.login(username, password) #logger.info('email = %s', email.as_string()) smtp.sendmail(sender, recipients, email.as_string()) smtp.quit() logger.info(mail_log_msg) except Exception, e: logger.error(mail_log_msg) raise
def sync_macros(self): """Summary Sync inputs for update macros based on custom indexes. """ logger.info('syncing inputs...') # get the snapshot of current inputs from summary index inputs_spl = util.get_option_from_conf(self.session_key, 'macros', 'aws-sourcetype-index-summary', 'definition') input_list = splunk_search.searchAll(inputs_spl, sessionKey = self.session_key) logger.info('%s input(s) in total' % len(input_list)) for input in input_list: index_name = input.get('input_index')[0].value sourcetype = input.get('input_sourcetype')[0].value # update macros if sourcetype in SOURCETYPE_MACRO_MAP: macro_stanza = SOURCETYPE_MACRO_MAP[sourcetype] util.update_index_macro(self.session_key, macro_stanza, index_name) # enable savedsearches saved_searches = self.local_service.saved_searches for search_name in SCHEDULE_SEARCHES: if search_name in saved_searches: search = saved_searches[search_name] enabled = splunk_util.normalizeBoolean(search.content['is_scheduled']) if not enabled: search.update(**{'is_scheduled': 1}) return 'Macros Update Complete.'
def _initAlertActionsDefaults(self): """ use alertActions entity to determine default papersize return in form of "<size>" or "<size>-landscape" """ paperSize = DEFAULT_PAPER_SIZE paperOrientation = DEFAULT_PAPER_ORIENTATION try: # SPL-107168 Passing namespace and owner to generate context specifc endpoint settings = entity.getEntity(self.ALERT_ACTIONS_ENTITY, 'email', namespace=self._namespace, owner=self._owner, sessionKey=self.sessionKey) # paperSize is 'letter', 'legal', 'A4', etc paperSize = settings.get('reportPaperSize') or DEFAULT_PAPER_SIZE # paperOrientation is 'portrait' or 'landscape' paperOrientation = settings.get('reportPaperOrientation') or DEFAULT_PAPER_ORIENTATION self._includeSplunkLogo = normalizeBoolean(settings.get('reportIncludeSplunkLogo', self._includeSplunkLogo)) cidFontListString = settings.get('reportCIDFontList', '') or '' self._cidFontList = cidFontListString.split(' ') self._fileNamePattern = settings.get('reportFileName') # retrieve pdf settings for k, v in settings.iteritems(): if k.startswith(pdfrenderer.SETTING_PREFIX): self._pdfSettings[k] = v keyNoPrefix = k[len(pdfrenderer.SETTING_PREFIX):len(k)] # SSL settings are stored in pdf namespace, but the ssl_context # won't find these settings if they are prefixed w/ pdf self._requestSettings['pdf'][keyNoPrefix] = v except Exception as e: logger.error("Could not access or parse email stanza of alert_actions.conf. Error=%s" % str(e)) if paperOrientation == 'landscape': self._paperSize = paperSize + '-landscape' else: self._paperSize = paperSize
def __init__(self, scheme_args, args=None, this_logger=None): """ Set up the modular input. Arguments: scheme_args -- The title (e.g. "Database Connector"), description of the input (e.g. "Get data from a database"), etc. args -- A list of Field instances for validating the arguments this_logger - A logger instance (defaults to None) """ # default to global logger (python_modular_input) self.logger = this_logger if this_logger is not None else logger # Set the scheme arguments. for arg in [ 'title', 'description', 'use_external_validation', 'streaming_mode', 'use_single_instance' ]: setattr(self, arg, self._is_valid_param(arg, scheme_args.get(arg))) for arg in ['always_run', 'requires_kvstore']: if arg in scheme_args and normalizeBoolean( scheme_args[arg]) is True: setattr(self, arg, True) else: setattr(self, arg, False) for arg in ['kvstore_wait_time']: try: setattr(self, arg, int(scheme_args[arg])) except Exception: setattr(self, arg, 0) self.args = [] if args is None else args[:]
def _initAlertActionsDefaults(self): """ use alertActions entity to determine default papersize return in form of "<size>" or "<size>-landscape" """ paperSize = DEFAULT_PAPER_SIZE paperOrientation = DEFAULT_PAPER_ORIENTATION try: settings = entity.getEntity(self.ALERT_ACTIONS_ENTITY, 'email', sessionKey=self.sessionKey) # paperSize is 'letter', 'legal', 'A4', etc paperSize = settings.get('reportPaperSize') or DEFAULT_PAPER_SIZE # paperOrientation is 'portrait' or 'landscape' paperOrientation = settings.get( 'reportPaperOrientation') or DEFAULT_PAPER_ORIENTATION self._includeSplunkLogo = normalizeBoolean( settings.get('reportIncludeSplunkLogo', self._includeSplunkLogo)) cidFontListString = settings.get('reportCIDFontList', '') or '' self._cidFontList = cidFontListString.split(' ') except Exception as e: logger.error( "Could not access or parse email stanza of alert_actions.conf. Error=%s" % str(e)) if paperOrientation == 'landscape': self._paperSize = paperSize + '-landscape' else: self._paperSize = paperSize
def index(self, **kwargs): ''' Displays a tree view Arguments: eai_path - (Optional) Path of the source endpoint (ex: admin/win-ad-explorer); (Required if using eai_proxy) proxy_path - (Optional) Path of the proxy endpoint, by default tree/eai_proxy translating xml response into json. count - (Optional) Maximum number of nodes requested per click. selected_text - (Optional) Text in the bottom preceeding the selected path. ''' errors = [] proxy_path = '' selected_text = '' if not 'proxy_path' in kwargs and not 'eai_path' in kwargs: errors.append(_('No source specified for the treeview.')) else: proxy_path = kwargs.pop('proxy_path', 'tree/eai_proxy') selected_text = kwargs.pop('selected_text', _('Selected path: ')) # cleanse input for key in kwargs: kwargs[key] = urllib.quote(kwargs[key].strip()) templateArgs = {'proxy_path' : proxy_path, 'selected_text' : selected_text, 'start_node' : kwargs.pop('start_node', '').strip(), 'separate_children' : util.normalizeBoolean(kwargs.pop('separate_children', False)), 'data_args' : kwargs, 'errors' : errors } return self.render_template('/view/tree.html', templateArgs)
def getJobCount(jobContent): if jobContent.get('statusBuckets') == 0 or ( normalizeBoolean(jobContent.get('reportSearch')) and not re.match('sendemail', jobContent.get('reportSearch'))): return jobContent.get('resultCount') else: return jobContent.get('eventCount')
def normalize_verify(self, verify): normalized = normalizeBoolean(verify) if normalized in [True, False]: return normalized if normalized is None or normalized.strip() == '': return False return self.normalize_cert_path(verify)
def _renderEvents(self, pdfRenderer, view): """ render a listing of events """ # get options -- TODO: should be refactored with _renderTable (and beyond: should have a generalized system for getting options) options = view.getOptions() displayRowNumbers = False if "displayRowNumbers" in options: displayRowNumbers = normalizeBoolean(options['displayRowNumbers']) data = [] # fix header for events header = ['Time', 'Event'] events = view.getSearchJobEvents() for i, event in enumerate(events): if i >= self._maxRowsPerTable: break if (i > 0) and ((i % 100) == 0): self._keepAllSearchesAlive() data.append([str(event.time), event.raw]) if len(data) == 0: pdfRenderer.renderText("No matching events found.") else: tableData = pt.TableData(columnNames=header, data=data) pdfRenderer.renderTable(tableData, title=view.getTitle(), columnVAlignments=['TOP', 'TOP'], displayLineNumbers=displayRowNumbers)
def get_local_alert_actions(self, session_key): getargs = { 'output_mode': 'json', 'search': 'is_custom=1 AND payload_format=json', 'count': 0 } try: unused_r, c = simpleRequest('alerts/alert_actions', getargs=getargs, sessionKey=session_key, raiseAllErrors=True) except Exception as e: self.logger.error("Failed to get local alert actions: %s", e) return {} local_alert_actions = {} for action in json.loads(c)['entry']: # check if the action supports worker, if yes, add to the return dictionary name = action['name'] content = action['content'] try: _cam = json.loads(content['param._cam']) if normalizeBoolean(_cam['supports_workers']): local_alert_actions[name] = action except Exception: continue return local_alert_actions
def _renderView(self, pdfRenderer, view, lastView=False): """ render an individual panel """ types = view.getRenderTypes() pdfRenderer.conditionalPageBreak(types) # always render header first self._renderViewHeader(pdfRenderer, view) if view.hasError(): pdfRenderer.renderText(view.getError()) return if view.requiresSearchJobObj(): if view.hasSearchError(): pdfRenderer.renderText(view.getSearchError()) return while not view.isSearchComplete() and not view.isRealtime(): time.sleep(self.POLLING_WAIT_TIME) self._keepAllSearchesAlive() self._checkForTimeout() if 'trellis.enabled' in view.getOptions() and normalizeBoolean( view.getOptions()['trellis.enabled']): pdfRenderer.renderText( "PDF export is not available for visualizations using trellis layout." ) else: try: for type in types: if type == 'chart': self._renderChart(pdfRenderer, view) elif type == 'map': self._renderMap(pdfRenderer, view) elif type == 'table': self._renderTable(pdfRenderer, view) elif type == 'event': self._renderEvents(pdfRenderer, view) elif type == 'single': self._renderSingle(pdfRenderer, view) elif type == 'list': self._renderList(pdfRenderer, view) elif type == 'html': self._renderHtml(pdfRenderer, view) elif type == 'viz': pdfRenderer.renderText( "PDF export does not support custom visualizations." ) else: pdfRenderer.renderText( "No render option for type '%s'" % type) logger.warning( "PDFGenHandler::_renderView> No render option for type = '%s'" % type) except Exception as e: content = str(e) pu.logErrorAndTrace(e) pdfRenderer.renderText(content) if not lastView: pdfRenderer.spaceBetween()
def _renderEvents(self, pdfRenderer, view): """ render a listing of events """ # get options -- TODO: should be refactored with _renderTable (and beyond: should have a generalized system for getting options) options = view.getOptions() displayRowNumbers = False if "displayRowNumbers" in options: displayRowNumbers = normalizeBoolean(options['displayRowNumbers']) data = [] events = view.getSearchJobEvents() for i, event in enumerate(events): if i >= self._maxRowsPerTable: break if (i > 0) and ((i % 100) == 0): self._keepAllSearchesAlive() data.append([str(event.time), event.raw]) if len(data) == 0: pdfRenderer.renderText("No matching events found.") else: # only set column widths for the line number and timestamp columns, setting None # for the last column provides for automatically setting its width colWidths = [pdfRenderer.ONE_INCH * 1.3, None] # we want to hard wrap the raw event column only colHardWraps = [False, True] pdfRenderer.renderTable(data, columnWidths=colWidths, columnHardWraps=colHardWraps, columnVAlignments=['TOP', 'TOP'], displayLineNumbers=displayRowNumbers)
def _initWebDefaults(self): defaultSplunkdConnectionTimeout = 30 try: # SPL-107168 Passing namespace and owner to generate context specifc endpoint settings = entity.getEntity(self.WEB_ENTITY, 'settings', namespace=self._namespace, owner=self._owner, sessionKey=self.sessionKey) self._enableInsecurePdfgen = normalizeBoolean( settings.get('enable_insecure_pdfgen', self._enableInsecurePdfgen)) splunkdConnectionTimeout = int( settings.get('splunkdConnectionTimeout', defaultSplunkdConnectionTimeout)) if splunkdConnectionTimeout < defaultSplunkdConnectionTimeout: splunkdConnectionTimeout = defaultSplunkdConnectionTimeout splunk.rest.SPLUNKD_CONNECTION_TIMEOUT = splunkdConnectionTimeout except Exception as e: logger.error( "Could not access or parse settings stanza of web.conf. Error=%s" % e) splunk.rest.SPLUNKD_CONNECTION_TIMEOUT = defaultSplunkdConnectionTimeout finally: logger.info("splunkdConnectionTimeout=%s" % splunk.rest.SPLUNKD_CONNECTION_TIMEOUT)
def build_response(self, entities, selectionMode=None): output = jsonresponse.JsonResponse() output.count = entities.itemsPerPage output.offset = entities.offset output.total = entities.totalResults output.data = [] if entities: if len(entities) == 1 and not entities.values()[0].get('name'): # empty node return output blocks = [] for ent in entities.values(): hasChildren = util.normalizeBoolean(ent.get('hasSubNodes', True)) block = {'text': ent.get('name', ent.name), 'fileSize': ent.get('fileSize'), 'id': ent.name, 'hasChildren': hasChildren, 'classes': 'nonleaf' if hasChildren else 'leaf', 'selectable': self.is_selectable(ent, selectionMode) } blocks.append(block) output.data = sorted(blocks, key=lambda block: block['text'].lower()) return output
def getEntities(entityPath, namespace=None, owner=None, sessionKey=None, count=None, offset=0, hostPath=None, available=True): conns = en.getEntities(entityPath, namespace=namespace, owner=owner, count=count, offset=offset, sessionKey=sessionKey, hostPath=hostPath) for name in conns: params = conns[name].properties if available: for n, v in params.items(): if n in ["disabled"] or n.endswith(".disabled"): if v in ["-1"] or util.normalizeBoolean(v): del conns[name] return conns
def plainTableTemplate(results, ssContent): if len(results) > 0: width_sort_columns = normalizeBoolean( ssContent.get('action.email.width_sort_columns', True)) columnMaxLens = getSortedColumns(results, width_sort_columns) text = "" space = " " * 4 # output column names for col, maxlen in columnMaxLens: val = col padsize = maxlen - len(val) text += val + ' ' * padsize + space text += "\n" + "-" * len(text) + "\n" # output each result's values for result in results: for col, maxlen in columnMaxLens: val = result.get(col, "") padsize = maxlen - len(val) # left justify ALL the columns text += val + ' ' * padsize + space text += "\n" else: text = "No results found." return text
def _renderEvents(self, pdfRenderer, view): """ render a listing of events """ # get options -- TODO: should be refactored with _renderTable (and beyond: should have a generalized system for getting options) options = view.getOptions() displayRowNumbers = False if "displayRowNumbers" in options: displayRowNumbers = normalizeBoolean(options['displayRowNumbers']) data = [] events = view.getSearchJobEvents() for i, event in enumerate(events): if i >= self._maxRowsPerTable: break if (i > 0) and ((i % 100) == 0): self._keepAllSearchesAlive() data.append([str(event.time), event.raw]) if len(data) == 0: pdfRenderer.renderText("No matching events found.") else: # only set column widths for the line number and timestamp columns, setting None # for the last column provides for automatically setting its width colWidths = [pdfRenderer.ONE_INCH * 1.3, None] # we want to hard wrap the raw event column only colHardWraps = [False, True] pdfRenderer.renderTable(data, columnWidths=colWidths, columnHardWraps=colHardWraps, columnVAlignments=['TOP','TOP'], displayLineNumbers=displayRowNumbers)
def load_savedsearches_conf(self, session_key=None, namespace=None, owner=None): """ Configures the given saved search with the parameters loaded from the related savedsearches.conf """ # Refresh savedsearches.conf CorrelationSearch.__refresh_savedsearches__(session_key) # Get the saved search info saved_search = en.getEntity(CorrelationSearch.SAVED_SEARCHES_REST_URL, self.sid, namespace=namespace, owner=owner, sessionKey=session_key) self.enabled = CorrelationSearch.is_search_enabled(saved_search) self.start_time = saved_search.get('dispatch.earliest_time') self.end_time = saved_search.get('dispatch.latest_time') self.search = saved_search.get('search') self.cron_schedule = saved_search.get('cron_schedule') self.realtime_schedule = util.normalizeBoolean( saved_search.get('realtime_schedule', None)) # Load summary index alert action parameters. self.notable_action_enabled = util.normalizeBoolean( saved_search.get('action.notable', None)) # Load alerts self.alert_actions = { k: v for (k, v) in saved_search.iteritems() if k.startswith('action.') } # Load app and owner context self.namespace = saved_search.get('eai:acl').get('app') self.owner = saved_search.get('eai:acl').get('owner') # Load aggregation parameters self.group_by = CorrelationSearch.getGroupByAsList( saved_search.get('alert.suppress.fields', None)) # Set the aggregation to an empty string by default which indicates that no throttling is to be done self.aggregate_duration = Duration.duration_from_readable( saved_search.get('alert.suppress.period', ''))
def buildAttachments(settings, ssContent, results, email, jobCount): ssContent['errorArray'] = [] sendpdf = normalizeBoolean(ssContent.get('action.email.sendpdf', False)) sendcsv = normalizeBoolean(ssContent.get('action.email.sendcsv', False)) sendresults = normalizeBoolean(ssContent.get('action.email.sendresults', False)) inline = normalizeBoolean(ssContent.get('action.email.inline', False)) inlineFormat= ssContent.get('action.email.format') type = ssContent['type'] namespace = settings['namespace'] owner = settings['owner'] sessionKey = settings['sessionKey'] searchid = settings.get('sid') pdfview = ssContent.get('action.email.pdfview', '') subject = ssContent.get("action.email.subject") ssName = ssContent.get("name") server = ssContent.get('action.email.mailserver', 'localhost') results_link = ssContent.get('results_link') paperSize = ssContent.get('action.email.reportPaperSize', 'letter') paperOrientation = ssContent.get('action.email.reportPaperOrientation', 'portrait') pdfService = None pdf = None if sendpdf: import splunk.pdf.availability as pdf_availability pdfService = pdf_availability.which_pdf_service(sessionKey=sessionKey, viewId=pdfview, namespace=namespace, owner=owner) logger.info("sendemail pdfService = %s" % pdfService) try: if pdfService is "pdfgen": # will raise an Exception on error pdf = generatePDF(server, subject, searchid, settings, pdfview, ssName, paperSize, paperOrientation) elif pdfService is "deprecated": # will raise an Exception on error pdf = generatePDF_deprecated(results_link, subject, searchid, settings, pdfview, paperSize, paperOrientation) except Exception, e: logger.error("An error occurred while generating a PDF: %s" % e) ssContent['errorArray'].append("An error occurred while generating the PDF. Please see python.log for details.") if pdf: email.attach(pdf)
def _getPanelDictFromXmlElem(panelElem): """ convert an XML representation of a panel into a dict representation the dict representation should be identical to what is retrieved from legacy models models/dashboard_panel """ panelDict = {} optionsDict = {} panelDict["type"] = panelElem.tag panelDict["title"] = "" if panelElem.tag == "html": html_content = None if normalizeBoolean(panelElem.attrib.get('encoded', False)): from lxml.html import fromstring, tostring html_content = tostring(fromstring(panelElem.text), method='xml') else: html_content = et.tostring(panelElem) optionsDict[pu.PP_RAW_CONTENT] = html_content titleElem = panelElem.find("title") if titleElem: panelDict["title"] = titleElem.text else: panelDict["searchLatestTime"] = "" panelDict["searchEarliestTime"] = "" for elem in panelElem: if elem.text: strippedText = elem.text.strip(" \n\t\r") else: strippedText = "" if elem.tag == "option": optionsDict[elem.get('name')] = strippedText elif elem.tag == "searchString": panelDict["searchCommand"] = strippedText panelDict["searchMode"] = "string" elif elem.tag == "searchName": panelDict["searchCommand"] = strippedText panelDict["searchMode"] = "saved" elif elem.tag == "earliestTime": panelDict["searchEarliestTime"] = strippedText elif elem.tag == "latestTime": panelDict["searchLatestTime"] = strippedText elif elem.tag == "fields": if len(strippedText) and strippedText[0] == '[' and strippedText[-1] == ']': fieldList = json.loads(strippedText) else: fieldList = splunk.util.stringToFieldList(strippedText) panelDict["searchFieldList"] = [field.strip(" \n\t\r") for field in fieldList] else: panelDict[elem.tag] = strippedText if len(optionsDict) > 0: panelDict['options'] = optionsDict logger.debug("dashboard-xml panel xml=%s dict=%s" % (et.tostring(panelElem, pretty_print=True), panelDict)) return panelDict
def _setup_smtp(self, payload): """ Setup smtp to send out a group of emails. """ use_ssl = normalizeBoolean(self.ssContent.get('use_ssl', False)) use_tls = normalizeBoolean(self.ssContent.get('use_tls', False)) server = self.ssContent.get('mailserver', 'localhost') username = self.ssContent.get('auth_username', '') password = self.ssContent.get('clear_password', '') # setup the Open SSL Context sslHelper = ssl_context.SSLHelper() serverConfJSON = sslHelper.getServerSettings(self.sessionKey) # Pass in settings from alert_actions.conf into context ctx = sslHelper.createSSLContextFromSettings( sslConfJSON=self.ssContent, serverConfJSON=serverConfJSON, isClientContext=True) # send the mail if not use_ssl: smtp = secure_smtplib.SecureSMTP(host=server) else: smtp = secure_smtplib.SecureSMTP_SSL(host=server, sslContext=ctx) # smtp.set_debuglevel(1) if use_tls: smtp.starttls(ctx) if username and password and username.strip() and password.strip(): try: smtp.login(username, password) except SMTPAuthenticationError as e: logger.error('Email server: fail to authenticate: %s' % e) except SMTPHeloError as e: logger.error('Email server: fail to reply to hello: %s' % e) except SMTPException as e: logger.error( 'Email server: fail to find suitable authentication method: %s' % e) else: logger.warning( 'Email server: using unauthenticated connection to SMTP server' ) return smtp
def splunkd_scheme(cls): if not cls.scheme: import splunk.clilib.cli_common as comm import splunk.util as splutil enableSsl = comm.getConfKeyValue('server', 'sslConfig', 'enableSplunkdSSL') enableSsl = splutil.normalizeBoolean(enableSsl) cls.scheme = 'https' if enableSsl else 'http' return cls.scheme
def normalizeBoolean(input, enableStrictMode=False, includeIntegers=True): bool_value = False if IN_SPLUNK_ENV: bool_value = splunk_util.normalizeBoolean(input, enableStrictMode, includeIntegers) else: bool_value = _normalizeBoolean(input, enableStrictMode, includeIntegers) return bool_value
def is_selectable(self, ent, selectionMode): hasChildren = util.normalizeBoolean(ent.get('hasSubNodes', True)) if selectionMode == 1: return hasChildren elif selectionMode == 2: return not hasChildren else: return True
def _enable_savedsearches(service, savedsearches): for search_name in savedsearches: if search_name in service.saved_searches: search = service.saved_searches[search_name] enabled = splunkutil.normalizeBoolean( search.content['is_scheduled']) if not enabled: search.update(**{'is_scheduled': 1}) return
def _parse(self, node): """ Does the heavy lifting of converting the XML from splunkd to the object representation """ vals = _traverseTree(node) # pop off rawargs before normalizing booleans so that 'head 1' doesn't become 'head true' if 'rawargs' in vals: raw = vals.pop('rawargs') util.normalizeBoolean(vals) vals['rawargs'] = raw else: vals = util.normalizeBoolean(vals) self.properties = vals if 'args' in self.properties.keys(): #print "args:", self.properties['args'] # get rid of some search filter silliness if isinstance(self.properties['args'], dict): if 'search' in self.properties['args'].keys(): self.properties['args']['search'] = self.properties['args']['search']['clauses']
def generateResults(self, **kwargs): ''' be careful to account for tricky conditions where some users can't interact with our custom REST endpoint by falling back to bundle ''' app_name = kwargs.get('client_app', STATIC_APP) conf_name = 'unix' legacy_mode = False sessionKey = cherrypy.session.get('sessionKey') user = cherrypy.session['user']['name'] if os.path.exists(LEGACY_SETUP): shutil.move(LEGACY_SETUP, LEGACY_SETUP + '.bak') logger.info('disabled legacy setup.xml for %s' % app_name) # if the current app doesn't exist... app = App.get(App.build_id(app_name, app_name, user)) try: a = Unix.get(Unix.build_id(user, app_name, user)) except: a = Unix(app_name, user, user) if kwargs.get('set_ignore'): try: a.has_ignored = True a.save() except: # assumption: 99% of exceptions here will be 403 # we could version check, but this seems better to_set = {user: {'has_ignored': 1}} self.setConf(to_set, conf_name, namespace=app_name, sessionKey=sessionKey, owner=user) legacy_mode = True return self.render_json({'has_ignored': True, 'errors': ['legacy_mode=%s' % legacy_mode]}) if a.id and a.has_ignored: return self.render_json({'has_ignored': True, 'errors': []}) else: conf = self.getConf(conf_name, sessionKey=sessionKey, namespace=app_name, owner=user) if conf and conf[user] and util.normalizeBoolean(conf[user]['has_ignored']): return self.render_json({'has_ignored': True, 'errors': ['using legacy method']}) if app.is_configured: return self.render_json({'is_configured': True, 'errors': []}) else: if self.is_app_admin(app, user): return self.render_json({'is_configured': False, 'is_admin': True, 'errors': []}) return self.render_json({'is_configured': False, 'is_admin': False, 'errors': []})
def scrape_page(self, url, selector, **kwargs): """ Perform a page scrape and return the results (useful for previewing a web_input modular input configuration) """ result = {} # Run the input try: web_input = WebInput(timeout=10) # Get the authentication information, if available username = None password = None if( 'password' in kwargs and 'username' in kwargs): username = kwargs['username'] password = kwargs['password'] # Get the user-agent string user_agent = None if( 'user_agent' in kwargs): user_agent = kwargs['user_agent'] # Determine if we should include empty matches include_empty_matches = False if 'include_empty_matches' in kwargs: include_empty_matches = util.normalizeBoolean(kwargs['include_empty_matches'], True) # Get the proxy configuration conf_stanza = "default" try: proxy_type, proxy_server, proxy_port, proxy_user, proxy_password = web_input.get_proxy_config(cherrypy.session.get('sessionKey'), conf_stanza) except splunk.ResourceNotFound: cherrypy.response.status = 202 return self.render_error_json(_("Proxy server information could not be obtained")) # Scrape the page result = WebInput.scrape_page( url, selector, username=username, password=password, include_empty_matches=include_empty_matches, proxy_type=proxy_type, proxy_server=proxy_server, proxy_port=proxy_port, proxy_user=proxy_user, proxy_password=proxy_password, user_agent=user_agent) except FieldValidationException, e: cherrypy.response.status = 202 return self.render_error_json(_(str(e)))
def getRenderTypes(self): """ determine which render types to use for the report """ if self._isTransformingSearch(): showViz = normalizeBoolean(self._savedSearchModel.entity.get("display.visualizations.show", True)) if showViz: reportVizType = self._savedSearchModel.entity.get("display.visualizations.type", "charting") renderVizType = None if reportVizType == "mapping": renderVizType = 'map' elif reportVizType == "singlevalue": renderVizType = 'single' else: renderVizType = 'chart' return [renderVizType, 'table'] else: return ['table'] else: return ['event']
def _initAlertActionsDefaults(self): """ use alertActions entity to determine default papersize return in form of "<size>" or "<size>-landscape" """ paperSize = DEFAULT_PAPER_SIZE paperOrientation = DEFAULT_PAPER_ORIENTATION try: settings = entity.getEntity(self.ALERT_ACTIONS_ENTITY, 'email', sessionKey=self.sessionKey) # paperSize is 'letter', 'legal', 'A4', etc paperSize = settings.get('reportPaperSize') or DEFAULT_PAPER_SIZE # paperOrientation is 'portrait' or 'landscape' paperOrientation = settings.get('reportPaperOrientation') or DEFAULT_PAPER_ORIENTATION self._includeSplunkLogo = normalizeBoolean(settings.get('reportIncludeSplunkLogo', self._includeSplunkLogo)) cidFontListString = settings.get('reportCIDFontList', '') or '' self._cidFontList = cidFontListString.split(' ') except Exception as e: logger.error("Could not access or parse email stanza of alert_actions.conf. Error=%s" % str(e)) if paperOrientation == 'landscape': self._paperSize = paperSize + '-landscape' else: self._paperSize = paperSize
def plainTableTemplate(results, ssContent): if len(results) > 0: width_sort_columns = normalizeBoolean(ssContent.get('action.email.width_sort_columns', True)) columnMaxLens = getSortedColumns(results, width_sort_columns) text = "" space = " "*4 # output column names for col, maxlen in columnMaxLens: val = col padsize = maxlen - len(val) text += val + ' '*padsize + space text += "\n" + "-"*len(text) + "\n" # output each result's values for result in results: for col, maxlen in columnMaxLens: val = result.get(col, "") padsize = maxlen - len(val) # left justify ALL the columns text += val + ' '*padsize + space text += "\n" else: text = "No results found." return text
def _initParameters(self): self._owner=self.request['userName'] self._initArgs() self._initLimitsDefaults() self._initAlertActionsDefaults() # initialize view type if self.ARG_INPUT_DASHBOARD in self.args: self._dashboardName = self.args.get(self.ARG_INPUT_DASHBOARD) self._viewType = self.VIEW_TYPE_DASHBOARD elif self.ARG_INPUT_DASHBOARD_XML in self.args: self._dashboardXml = urllib.unquote(self.args.get(self.ARG_INPUT_DASHBOARD_XML)) self._viewType = self.VIEW_TYPE_DASHBOARD logger.debug("pdfgen/render xml=%s" % self._dashboardXml) elif self.ARG_INPUT_REPORT in self.args: self._reportName = self.args.get(self.ARG_INPUT_REPORT) self._viewType = self.VIEW_TYPE_REPORT elif self.ARG_INPUT_SEARCH in self.args: self._searchStr = self.args.get(self.ARG_INPUT_SEARCH, "No search query specified") self._et = self.args.get(self.ARG_INPUT_ET, 0) self._lt = self.args.get(self.ARG_INPUT_LT, '') # if et or lt is 0.000 change it to 0 if float(self._et)==0.0: logger.debug("_et was %s, updating it to '0'" % self._et) self._et = '0' if self._lt and float(self._lt)==0.0: logger.debug("_lt was %s, updating it to '0'" % self._lt) self._lt = '0' self._reportName = 'Splunk search results' self._viewType = self.VIEW_TYPE_SEARCH # initialize papersize if self.ARG_INPUT_PAPERSIZE in self.args: paperSizeArg = self.args.get(self.ARG_INPUT_PAPERSIZE).lower() if paperSizeArg in pdfrenderer.PAPERSIZES: self._paperSize = paperSizeArg else: raise ArgError("Paper size " + paperSizeArg + " not valid") logger.debug("pdf-init paper-size=%s" % self._paperSize) # initialize include-splunk-logo self._includeSplunkLogo = normalizeBoolean(self.args.get(self.ARG_INPUT_INCLUDE_SPLUNK_LOGO, self._includeSplunkLogo)) logger.debug("pdf-init include-splunk-logo=%s" % self._includeSplunkLogo) # initialize max-row-per-table if self.ARG_INPUT_MAX_ROWS_PER_TABLE in self.args: maxRowsPerTableArg = self.args.get(self.ARG_INPUT_MAX_ROWS_PER_TABLE) try: self._maxRowsPerTable = int(maxRowsPerTableArg) except: raise ArgError("max-rows-per-table=%s is invalid, must be an integer" % maxRowsPerTableArg) logger.debug("pdf-init max-rows-per-table=%s" % (str(self._maxRowsPerTable))) # initialize timeout if self.ARG_INPUT_TIMEOUT in self.args: self._timeoutDuration = int(self.args.get(self.ARG_INPUT_TIMEOUT)) logger.debug("pdf-init timeoutDuration=%s" % self._timeoutDuration) self._startTimeoutClock() # initialize time of report self._initTimeOfReport() # check for SIDs if self._viewType is self.VIEW_TYPE_REPORT: if self.ARG_INPUT_SID in self.args: self._inputSids[0] = self.args.get(self.ARG_INPUT_SID) else: for argK, argV in self.args.items(): if self.ARG_INPUT_SID in argK: # we want the panel sequence number which is retrieved from "sid_<seqNum>" match = self.sidRE.match(argK) if match != None and len(match.groups(0)) > 0: seqNum = match.groups(0)[0] if len(seqNum) > 0: self._inputSids[int(seqNum)] = argV logger.debug("sid seqNum=%s value=%s" % (seqNum, argV)) # get namespace/owner self._namespace = self.args.get(self.ARG_INPUT_NAMESPACE) if self.ARG_INPUT_OWNER in self.args: self._owner = self.args.get(self.ARG_INPUT_OWNER) self._validateParameters() self._timestampStr = splunk.search.searchUtils.getFormattedTimeForUser( sessionKey=self.sessionKey, user = self._owner, namespace = self._namespace, now = self._now )
def scrape_page(self, **kwargs): """ Perform a page scrape and return the results (useful for previewing a web_input modular input configuration) """ result = [{}] # Run the input try: web_input = WebInput(timeout=10) kw = {} # Get the URL or URI url = None if 'url' in kwargs: url = kwargs['url'] elif 'uri' in kwargs: url = kwargs['uri'] if url is None: cherrypy.response.status = 202 return self.render_error_json(_("No URL was provided")) # Get the selector selector = None if 'selector' in kwargs: selector = kwargs['selector'] # Determine if we should include empty matches if 'empty_matches' in kwargs: kw['include_empty_matches'] = util.normalizeBoolean(kwargs['empty_matches'], True) # Get the use_element_name parameter if 'use_element_name' in kwargs: kw['use_element_name'] = util.normalizeBoolean(kwargs['use_element_name'], False) # Get the text_separator parameter if 'text_separator' in kwargs: kw['text_separator'] = kwargs['text_separator'] # Get the output_as_mv parameter. This parameter is different from the name of the # argument that the class accepts and will be renamed accrdingly. if 'output_as_mv' in kwargs: kw['output_matches_as_mv'] = util.normalizeBoolean(kwargs['output_as_mv'], True) # If we are outputting as multi-valued parameters, then don't include the separate # fields if kw['output_matches_as_mv']: kw['output_matches_as_separate_fields'] = False else: # http://lukemurphey.net/issues/1643 kw['output_matches_as_separate_fields'] = True # Get the field match prefix if 'match_prefix' in kwargs: kw['match_prefix'] = kwargs['match_prefix'] # Get the browser parameter if 'browser' in kwargs: kw['browser'] = kwargs['browser'] # Get the page_limit parameter if 'page_limit' in kwargs: kw['page_limit'] = int(kwargs['page_limit']) # Get the depth_limit parameter if 'depth_limit' in kwargs: kw['depth_limit'] = int(kwargs['depth_limit']) # Get the depth_limit parameter if 'url_filter' in kwargs: kw['url_filter'] = kwargs['url_filter'] # Get the name_attributes parameter if 'name_attributes' in kwargs: kw['name_attributes'] = kwargs['name_attributes'] # Get the raw_content parameter if 'raw_content' in kwargs: kw['include_raw_content'] = util.normalizeBoolean(kwargs['raw_content']) # Only extract links using HTTPS if on Splunk Cloud if ModularInput.is_on_cloud(cherrypy.session.get('sessionKey')): kw['https_only'] = True # Otherwise, allow callers to specify which links to extract elif 'https_only' in kwargs: kw['https_only'] = util.normalizeBoolean(kwargs['https_only']) # Get the proxy configuration conf_stanza = "default" # Get the timeout parameter timeout = 5 if 'timeout' in kwargs: try: timeout = int(kwargs['timeout']) except: # The timeout is invalid. Ignore this for now, it will get picked up when # the user attempts to save the input pass # Make the web scraper instance web_scraper = WebScraper(timeout) # Get the authentication information, if available username = None password = None if 'password' in kwargs and 'username' in kwargs: username = kwargs['username'] password = kwargs['password'] username_field = kwargs.get('username_field', None) password_field = kwargs.get('password_field', None) authentication_url = kwargs.get('authentication_url', None) if authentication_url is not None: authentication_url = urlparse.urlparse(authentication_url) logger.debug("Using credentials for scrape_page") web_scraper.set_authentication(username, password, authentication_url, username_field, password_field) # Get the user-agent string if 'user_agent' in kwargs: web_scraper.user_agent = kwargs['user_agent'] # Set the proxy authentication try: proxy_type, proxy_server, proxy_port, proxy_user, proxy_password = web_input.get_proxy_config(cherrypy.session.get('sessionKey'), conf_stanza) web_scraper.set_proxy(proxy_type, proxy_server, proxy_port, proxy_user, proxy_password) except splunk.ResourceNotFound: cherrypy.response.status = 202 return self.render_error_json(_("Proxy server information could not be obtained")) # Scrape the page result = web_scraper.scrape_page(url, selector, **kw) except FieldValidationException as e: cherrypy.response.status = 220 return self.render_error_json(_(str(e))) except ServerNotFoundError as e: cherrypy.response.status = 220 return self.render_error_json(_(str(e))) except (SelectorError, SelectorSyntaxError, ExpressionError): cherrypy.response.status = 220 return self.render_error_json(_("Selector is invalid. ")) except LoginFormNotFound: cherrypy.response.status = 220 return self.render_error_json("Login form was not found") except FormAuthenticationFailed: cherrypy.response.status = 220 return self.render_error_json("Form authentication failed") except Exception as e: cherrypy.response.status = 500 logger.exception("Error generated during execution") return self.render_error_json(_(str(e))) # Return the information if 'include_first_result_only' in kwargs: return self.render_json(result[0], set_mime='application/json') else: return self.render_json(result, set_mime='application/json')
except Exception, e: logger.info(e) # layer in arg vals if argvals.get('to'): ssContent['action.email.to'] = argvals.get('to') if argvals.get('bcc'): ssContent['action.email.bcc'] = argvals.get('bcc') if argvals.get('cc'): ssContent['action.email.cc'] = argvals.get('cc') if argvals.get('format'): ssContent['action.email.format'] = argvals.get('format') if argvals.get('from'): ssContent['action.email.from'] = argvals.get('from') if argvals.get('inline'): ssContent['action.email.inline'] = normalizeBoolean(argvals.get('inline')) if argvals.get('sendresults'): ssContent['action.email.sendresults'] = normalizeBoolean(argvals.get('sendresults')) if argvals.get('sendpdf'): ssContent['action.email.sendpdf'] = normalizeBoolean(argvals.get('sendpdf')) if argvals.get('pdfview'): ssContent['action.email.pdfview'] = argvals.get('pdfview') if argvals.get('papersize'): ssContent['action.email.reportPaperSize'] = argvals.get('papersize') if argvals.get('paperorientation'): ssContent['action.email.reportPaperOrientation'] = argvals.get('paperorientation') if argvals.get('sendcsv'): ssContent['action.email.sendcsv'] = normalizeBoolean(argvals.get('sendcsv')) if argvals.get('server'): ssContent['action.email.mailserver'] = argvals.get('server') if argvals.get('subject'):
def sendEmail(results, settings): keywords, argvals = splunk.Intersplunk.getKeywordsAndOptions() for key in argvals: argvals[key] = unquote(argvals[key]) namespace = settings['namespace'] owner = settings['owner'] sessionKey = settings['sessionKey'] sid = settings['sid'] ssname = argvals.get('ssname') isScheduledView = False if ssname: # populate content with savedsearch if '_ScheduledView__' in ssname or argvals.get('pdfview'): if '_ScheduledView__' in ssname: ssname = ssname.replace('_ScheduledView__', '') else: ssname = argvals.get('pdfview') uri = entity.buildEndpoint( [ 'scheduled', 'views', ssname], namespace=namespace, owner=owner ) isScheduledView = True else: uri = entity.buildEndpoint( [ 'saved', 'searches', ssname ], namespace=namespace, owner=owner ) responseHeaders, responseBody = simpleRequest(uri, method='GET', getargs={'output_mode':'json'}, sessionKey=sessionKey) savedSearch = json.loads(responseBody) ssContent = savedSearch['entry'][0]['content'] # set type of saved search if isScheduledView: ssContent['type'] = 'view' elif savedSearchJSONIsAlert(savedSearch): ssContent['type'] = 'alert' else: ssContent['type'] = 'report' # remap needed attributes that are not already on the content ssContent['name'] = ssname ssContent['app'] = savedSearch['entry'][0]['acl'].get('app') ssContent['owner'] = savedSearch['entry'][0]['acl'].get('owner') # The footer.text key will always exist for type alert and report. # It may not exist for scheduled views created before 6.1 therefore the schedule view default footer.text # should be set if the key does not exist. # This can be removed once migration has happened to ensure scheduled views always have the footer.text attribute ssContent['action.email.footer.text'] = ssContent.get('action.email.footer.text', "If you believe you've received this email in error, please see your Splunk administrator.\r\n\r\nsplunk > the engine for machine data") # The message key will always exist for type alert and report. # It may not exist for scheduled views created before 6.1 therefore the schedule view default message # should be set if the key does not exist. # This can be removed once migration has happened to ensure scheduled views always have the message.view attribute ssContent['action.email.message'] = ssContent.get('action.email.message.' + ssContent.get('type'), 'A PDF was generated for $name$') if normalizeBoolean(ssContent.get('action.email.useNSSubject', False)): ssContent['action.email.subject'] = ssContent['action.email.subject.' + ssContent.get('type')] # prior to 6.1 the results link was sent as the argval sslink, must check both results_link # and sslink for backwards compatibility ssContent['results_link'] = argvals.get('results_link', argvals.get('sslink', '')) if normalizeBoolean(ssContent['results_link']) and normalizeBoolean(ssContent['type']): split_results_path = urllib.splitquery(ssContent.get('results_link'))[0].split('/') view_path = '/'.join(split_results_path[:-1]) + '/' ssType = ssContent.get('type') if ssType == 'alert': ssContent['view_link'] = view_path + 'alert?' + urllib.urlencode({'s': savedSearch['entry'][0]['links'].get('alternate')}) elif ssType == 'report': ssContent['view_link'] = view_path + 'report?' + urllib.urlencode({'s': savedSearch['entry'][0]['links'].get('alternate'), 'sid': sid}) elif ssType == 'view': ssContent['view_link'] = view_path + ssContent['name'] else: ssContent['view_link'] = view_path + 'search' else: #assumes that if no ssname then called from searchbar ssContent = { 'type': 'searchCommand', 'view_link': '', 'action.email.sendresults': False, 'action.email.sendpdf': False, 'action.email.sendcsv': False, 'action.email.inline': True, 'action.email.format': 'table', 'action.email.subject': 'Splunk Results', 'action.email.footer.text': "If you believe you've received this email in error, please see your Splunk administrator.\r\n\r\nsplunk > the engine for machine data" } ssContent['trigger_date'] = None ssContent['trigger_timeHMS'] = None ssContent['trigger_time'] = argvals.get('trigger_time') ssContent if normalizeBoolean(ssContent['trigger_time']): try: triggerSeconds = time.localtime(float(ssContent['trigger_time'])) ssContent['trigger_date'] = time.strftime("%B %d, %Y", triggerSeconds) ssContent['trigger_timeHMS'] = time.strftime("%I:%M:%S", triggerSeconds) except Exception, e: logger.info(e)
def edit(self, app, action, row, column, **params): # saved_searches = SavedSearch.all().filter_by_app(app) row = int(row) column = int(column) dashboard_panel = DashboardPanel.get(params.get('id'), (row, column)) # panel options layering via GET params with 'options.*' prefix option_key = 'options.' for param in params: if param.startswith(option_key): dashboard_panel.add_option(param[len(option_key):], params[param]) enable_fragment_id = splunk.util.normalizeBoolean(params.get('enable_fragment_id', True)) view_id = params.get('id').split('/')[-1:][0] template_args = dict(app=app, dashboard_panel=dashboard_panel, enable_fragment_id=enable_fragment_id, saved_searches={}, is_transforming=util.normalizeBoolean(params.get('is_transforming', True)), view_id=view_id) return self.render_template('paneleditor/edit.html', template_args)
def parse_saved_search(cmd, obj, eaiArgsList): """Funky saved-search argument parsing.""" action = [] # alert if eaiArgsList.has_key("alert") and util.normalizeBoolean(eaiArgsList["alert"]): eaiArgsList["is_scheduled"] = "1" eaiArgsList.pop("alert") else: eaiArgsList["is_scheduled"] = "0" # threshold if eaiArgsList.has_key("threshold"): alert_type, alert_comparator, alert_threshold = _parseThreshold(eaiArgsList["threshold"]) eaiArgsList["alert_type"] = alert_type eaiArgsList["alert_comparator"] = alert_comparator eaiArgsList["alert_threshold"] = alert_threshold eaiArgsList.pop("threshold") # email if eaiArgsList.has_key("email"): eaiArgsList["action.email.to"] = eaiArgsList["email"] eaiArgsList.pop("email") action.append("emai") # attach if eaiArgsList.has_key("attach"): eaiArgsList["action.email.sendresults"] = "1" eaiArgsList.pop("attach") # script if eaiArgsList.has_key("script"): eaiArgsList["action.script.filename"] = eaiArgsList["script"] eaiArgsList.pop("script") action.append("script") # summary_index if eaiArgsList.has_key("summary_index"): eaiArgsList["action.summary_index._name"] = eaiArgsList["summary_index"] eaiArgsList.pop("summary_index") action.append("summary_index") # action eaiArgsList["actions"] = ",".join(action) # start_time if eaiArgsList.has_key("start_time"): eaiArgsList["dispatch.earliest_time"] = eaiArgsList["start_time"] eaiArgsList.pop("start_time") # end_time if eaiArgsList.has_key("end_time"): eaiArgsList["dispatch.latest_time"] = eaiArgsList["end_time"] eaiArgsList.pop("end_time") # ttl if not eaiArgsList.has_key("dispatch.ttl"): if eaiArgsList.has_key("ttl"): eaiArgsList["dispatch.ttl"] = eaiArgsList["ttl"] eaiArgsList.pop("ttl") # fields if eaiArgsList.has_key("fields"): items = eaiArgsList["fields"].split(";") for ele in items: if len(ele.split(":")) != 2: raise ArgError, "Each argument to 'fields' must be in 'key:value' format" k, v = ele.split(":") eaiArgsList["%s.%s" % ("action.summary_index", k)] = v eaiArgsList.pop("fields")
def buildHTMLBody(ssContent, results, settings, email, jobCount): messageHTML = re.sub(r'\r\n?|\n', '<br \>\r\n', htmlMessageTemplate().render(msg=ssContent.get('action.email.message'))) resultsHTML = '' metaDataHTML = '' errorHTML = '' if ssContent['type'] == 'view': metaDataHTML = htmlMetaDataViewTemplate().render( view_link=ssContent.get('view_link') ) errorHTML = htmlErrorTemplate().render(errors=ssContent.get('errorArray')) else: if ssContent['type'] != 'searchCommand': metaDataHTML = htmlMetaDataSSTemplate().render( jobcount=jobCount, results_link=ssContent.get('results_link'), include_results_link=normalizeBoolean(ssContent.get('action.email.include.results_link')), view_link=ssContent.get('view_link'), include_view_link=normalizeBoolean(ssContent.get('action.email.include.view_link')), name=ssContent.get('name'), include_search=normalizeBoolean(ssContent.get('action.email.include.search')), ssquery=ssContent.get('search'), alert_type=ssContent.get('alert_type'), include_trigger=normalizeBoolean(ssContent.get('action.email.include.trigger')), include_inline=normalizeBoolean(ssContent.get('action.email.inline')), include_trigger_time=normalizeBoolean(ssContent.get('action.email.include.trigger_time')), trigger_date=ssContent.get('trigger_date'), trigger_timeHMS=ssContent.get('trigger_timeHMS'), ssType=ssContent.get('type'), ) errorHTML = htmlErrorTemplate().render(errors=ssContent.get('errorArray')) # need to check aciton.email.sendresults for type searchCommand if normalizeBoolean(ssContent.get('action.email.inline')) and normalizeBoolean(ssContent.get('action.email.sendresults')): resultsHTML = htmlResultsTemplate().render( include_results_link=normalizeBoolean(ssContent.get('action.email.include.results_link')), results_link=ssContent.get('results_link'), truncated=normalizeBoolean(settings.get('truncated')), resultscount=len(results), jobcount=jobCount, hasjob=normalizeBoolean(settings.get('sid')) ) format = ssContent.get('action.email.format') if format == 'table': resultsHTML += htmlTableTemplate().render(results=results) elif format == 'raw': resultsHTML += htmlRawTemplate().render(results=results) elif format == 'csv': resultsHTML += htmlCSVTemplate().render(results=results) footerHTML = htmlFooterTemplate().render(footer=ssContent.get('action.email.footer.text'), re=re, filters=filters) wrapperHTML = htmlWrapperTemplate().render(body=messageHTML+metaDataHTML+errorHTML+resultsHTML+footerHTML) email.attach(MIMEText(wrapperHTML, 'html', _charset=CHARSET))
def buildPlainTextBody(ssContent, results, settings, email, jobCount): plainTextMsg = buildPlainTextMessage().render(msg=ssContent.get('action.email.message')) plainResults = '' plainTextMeta = '' plainError = '' if ssContent['type'] == 'view': plainTextMeta = buildPlainTextViewMetaData().render( view_link=ssContent.get('view_link') ) plainError = buildPlainTextError().render(errors=ssContent.get('errorArray')) else: if ssContent['type'] != 'searchCommand': plainTextMeta = buildPlainTextSSMetaData().render( jobcount=jobCount, results_link=ssContent.get('results_link'), include_results_link=normalizeBoolean(ssContent.get('action.email.include.results_link')), view_link=ssContent.get('view_link'), include_view_link=normalizeBoolean(ssContent.get('action.email.include.view_link')), name=ssContent.get('name'), include_search=normalizeBoolean(ssContent.get('action.email.include.search')), ssquery=ssContent.get('search'), alert_type=ssContent.get('alert_type'), include_trigger=normalizeBoolean(ssContent.get('action.email.include.trigger')), include_inline=normalizeBoolean(ssContent.get('action.email.inline')), include_trigger_time=normalizeBoolean(ssContent.get('action.email.include.trigger_time')), trigger_date=ssContent.get('trigger_date'), trigger_timeHMS=ssContent.get('trigger_timeHMS'), ssType=ssContent.get('type') ) plainError = buildPlainTextError().render(errors=ssContent.get('errorArray')) # need to check aciton.email.sendresults for type searchCommand if normalizeBoolean(ssContent.get('action.email.inline')) and normalizeBoolean(ssContent.get('action.email.sendresults')): plainResults = plainResultsTemplate().render( include_results_link=normalizeBoolean(ssContent.get('action.email.include.results_link')), results_link=ssContent.get('results_link'), truncated=normalizeBoolean(settings.get('truncated')), resultscount=len(results), jobcount=jobCount, hasjob=normalizeBoolean(settings.get('sid')) ) format = ssContent.get('action.email.format') if format == 'table': plainResults += plainTableTemplate(results, ssContent) elif format == 'raw': plainResults += plainRawTemplate().render(results=results) elif format == 'csv': plainResults += plainCSVTemplate(results) plainFooter = plainFooterTemplate().render(footer=ssContent.get('action.email.footer.text')) email.attach(MIMEText(plainTextMsg + plainTextMeta + plainError + plainResults + plainFooter, 'plain', _charset=CHARSET))
def __init__(self, url=None, selector=None, username=None, password=None, timeout=30, name_attributes=None, output_as_mv=True, output_matches_as_mv=None, output_matches_as_separate_fields=False, use_element_name=False, page_limit=1, depth_limit=50, url_filter=None, text_separator=" ", raw_content=False, include_raw_content=None, browser=None, match_prefix=None, user_agent=None, empty_matches=False, empty_value='NULL', authentication_url=None, username_field=None, password_field=None): # Note: output_matches_as_mv and include_raw_content are supported for legacy purposes # Make sure the required arguments are provided if url is None: raise ValueError("url argument must be provided") if selector is None: raise ValueError("selector argument must be provided") # Use the older output_matches_as_mv field if included if output_matches_as_mv is not None: output_as_mv = output_matches_as_mv # Decide on whether to include the matches as separate fields if output_as_mv is set if normalizeBoolean(output_as_mv): output_as_mv = True output_matches_as_separate_fields = False else: output_as_mv = False output_matches_as_separate_fields = True if name_attributes is None: name_attributes = [] # Make the web scraper instance self.web_scraper = WebScraper(int(timeout)) self.web_scraper.user_agent = user_agent # Save the parameters self.params = { "url": url, "selector": selector, "name_attributes": name_attributes, "output_matches_as_mv": normalizeBoolean(output_as_mv), "output_matches_as_separate_fields": normalizeBoolean(output_matches_as_separate_fields), "include_empty_matches": empty_matches, "empty_value": empty_value, "use_element_name" : normalizeBoolean(use_element_name), "page_limit" : int(page_limit), "depth_limit" : int(depth_limit), "url_filter" : url_filter, "include_raw_content" : normalizeBoolean(include_raw_content) if include_raw_content is not None else normalizeBoolean(raw_content), "text_separator" : text_separator, "browser" : browser, "match_prefix" : match_prefix } if username is not None and password is not None: self.web_scraper.set_authentication(username, password, authentication_url, username_field, password_field) SearchCommand.__init__(self, run_in_preview=True, logger_name="web_scrape") self.logger.info("Web scraper running against url=%s", url)
def getTimeRanges(namespace=None): ''' Returns a list of splunk search time identifiers that are defined in the time.conf files, ordered ascending by the 'order' key in each time stanza. Sample time.conf stanza: [last_7d] label = Last 7 Days earliest_time = -7d latest_time = false order = 30 [previous2_m] label = Last Month earliest_time = -2m@m latest_time = -1m@m order = 40 Sample return structure: [ { 'key' = 'last_7d', 'label' = 'Last 7 Days', 'earliest_time' = '-7d', 'latest_time' = False, 'order' = '30' }, { 'key' = 'previous2_m', 'label' = 'Last Month', 'earliest_time' = '-2m@m', 'latest_time' = '-1m@m', 'order' = '40' } ] ''' stanzas = cached.getEntities('admin/conf-times', namespace=namespace, search='disabled=0', count=-1) # the final flat ordered array we will output. orderedStanzas = [] # two dicts that we use only to check for configuration error cases. # if you use sub_menu = foo in a stanza in times.conf (to place that stanza within the sub_menu with label=foo) # you MUST have a stanza with label=foo, and is_sub_menu=True. # if you dont have this, right we just log an error at runtime. subMenusDefinedByChildren = {} subMenusPresent = {} for s in stanzas: if s == 'default': continue item = { 'key': s, 'label': stanzas[s]['label'], 'order': int(stanzas[s].get('order', 99999)) } # stanzas that are just sub_menu containers. # we only care so we can check for misconfigurations. if ('is_sub_menu' in stanzas[s] and util.normalizeBoolean(stanzas[s]['is_sub_menu'])): # were still outputting a flat list at the end of the day. # the caller will use this flag to build the hierarchy appropriately. item['is_sub_menu'] = True subMenusPresent[stanzas[s]['label']] = True # items that are meant to be INSIDE a submenu. If omitted it the item will go into the main menu. if (stanzas[s].get('sub_menu')) : item['sub_menu'] = stanzas[s]['sub_menu'] subMenusDefinedByChildren[stanzas[s]['sub_menu']] = True # header_label is optional. If omitted it will use the 'header' as the label. if (stanzas[s].get('header_label')) : item['header_label'] = stanzas[s]['header_label'] # only add time bounds if evaluates to something true for p in ('earliest_time', 'latest_time'): # loosening the checking to allow literal '0' values, rather than interpreting them as null. if ( util.normalizeBoolean(stanzas[s].get(p)) or (stanzas[s].get(p) == "0")): item[p] = stanzas[s][p] else: item[p] = False orderedStanzas.append(item) if (subMenusPresent.keys() != subMenusDefinedByChildren.keys()) : logger.error("Configuration error in times.conf. For each sub_menu key (%s) there must be an existing stanza (%s) and vice versa." % (subMenusDefinedByChildren.keys(), subMenusPresent.keys())) # even though the sub_menu = <sub_menu_name> items will be # sorted in alongside the main-level items, and other sub-menu items, # the client code will pull out all sub_menu items and attach them to the # is_sub_menu items. # The thinking is that some clients (ResultsHeader.html) just want a flat list of timeranges and iterating over a tree structure would suck. # other clients that need the tree (TimeRangePicker.html) will have to build it themselves using sub_menu and is_sub_menu # orderedStanzas.sort(cmp=lambda a,b: cmp(a['order'], b['order'])) orderedStanzas.sort(compareTimeRanges) return orderedStanzas
def __init__(self, settings, logger, action_name='unknown'): """ Initialize ModularAction class. @param settings: A modular action payload in JSON format. @param logger: A logging instance. Recommend using ModularAction.setup_logger. @param action_name: The action name. action_name in payload will take precedence. """ self.settings = json.loads(settings) self.logger = logger self.session_key = self.settings.get('session_key') self.sid = self.settings.get('sid') self.sid_snapshot = '' ## if sid contains rt_scheduler with snapshot-sid; drop snapshot-sid ## sometimes self.sid may be an integer (1465593470.1228) try: rtsid = re.match('^(rt_scheduler.*)\.(\d+)$', self.sid) if rtsid: self.sid = rtsid.group(1) self.sid_snapshot = rtsid.group(2) except: pass ## rid_ntuple is a named tuple that represents ## the three variables that change on a per-result basis self.rid_ntuple = collections.namedtuple('ID', ['orig_sid','rid','orig_rid']) ## rids is a list of rid_ntuple values ## automatically maintained by update() calls self.rids = [] ## current orig_sid based on update() ## aka self.rids[-1].orig_sid self.orig_sid = '' ## current rid based on update() ## aka self.rids[-1].rid self.rid = '' ## current orig_rid based on update() ## aka self.rids[-1].orig_rid self.orig_rid = '' self.results_file = self.settings.get('results_file') ## info self.info = {} if self.results_file: self.info_file = os.path.join(os.path.dirname(self.results_file), 'info.csv') self.search_name = self.settings.get('search_name') self.app = self.settings.get('app') self.user = self.settings.get('user') or self.settings.get('owner') self.configuration = self.settings.get('configuration', {}) ## enforce configuration is a 'dict' if not isinstance(self.configuration, dict): self.configuration = {} ## set loglevel to DEBUG if verbose if normalizeBoolean(self.configuration.get('verbose', 'false')): self.logger.setLevel(logging.DEBUG) self.logger.debug('loglevel set to DEBUG') ## use | sendalert param.action_name=$action_name$ self.action_name = self.configuration.get('action_name') or action_name ## use sid to determine action_mode if isinstance(self.sid, basestring) and 'scheduler' in self.sid: self.action_mode = 'saved' else: self.action_mode = 'adhoc' self.action_status = '' ## Since we don't use the result object we get from settings it will be purged try: del self.settings['result'] except Exception: pass ## events self.events = []
def _renderTable(self, pdfRenderer, view): """ render a table of results """ # get options options = view.getOptions() displayRowNumbers = False if "displayRowNumbers" in options: displayRowNumbers = normalizeBoolean(options['displayRowNumbers']) # build 2-d list of lists data = [] resultsExist = False # get results object # SPL-80004, need to be defensive against a custom post-process that was added when rendering another visualization view.getSearchJobObj().setFetchOptions(search=None) results = view.getSearchJobResults() # determine field set explicitFieldList = view.getSearchParams().get(pu.SP_FIELD_LIST, []) fieldOrder = [] if len(explicitFieldList) > 0: for field in explicitFieldList: if field in results.fieldOrder and field not in fieldOrder: fieldOrder.append(field) if len(fieldOrder) == 0: logger.warning("%s: results.fieldOrder does not contain any explicitly specified fields: %s" % (view.getTitle(), explicitFieldList)) return else: fieldOrder = self._renderTable_restrictFields(results.fieldOrder) if len(fieldOrder) == 0: pdfRenderer.renderText("No results found.") return results = view.getSearchJobResults() for i, result in enumerate(results): if i >= self._maxRowsPerTable: break if (i > 0) and ((i % 100) == 0): self._keepAllSearchesAlive() resultsExist = True # not every result row in the results list will contain a cell for every column in the table # fill in missing cells with the empty string values = [] for field in fieldOrder: if field in result.fields: fieldValues = result.get(field, None) logger.debug("fieldValues=%s len(fieldValues)=%s isinstance(fieldValues, splunk.search.RawEvent)=%s$" % (fieldValues, len(fieldValues), isinstance(fieldValues, splunk.search.RawEvent))) fieldValuesStr = "" if isinstance(fieldValues, splunk.search.RawEvent): fieldValuesStr = fieldValues elif len(fieldValues) > 1: fieldValueStrings = [str(x) for x in fieldValues] if "##__SPARKLINE__##".startswith(fieldValueStrings[0]): fieldValuesStr = ','.join(fieldValueStrings) else: fieldValuesStr = '\n'.join(fieldValueStrings) logger.debug("fieldValueStrings=%s fieldValuesStr=%s" % (fieldValueStrings, fieldValuesStr)) else: fieldValuesStr = fieldValues[0] values.append(fieldValuesStr) else: values.append("") data.append(values) columnWidths = [] if fieldOrder[0] == "_time": columnWidths.append(1.33 * pdfRenderer.ONE_INCH) if resultsExist: pdfRenderer.renderTable(data, headerRow = fieldOrder, columnWidths=columnWidths, displayLineNumbers=displayRowNumbers) else: logger.warning("PDFGenHandler::_renderTable> no results for table") pdfRenderer.renderText("No results found.")
def setup(file, nodeploy): """Set up a new Web Framework Django server instance""" print "\nSetting up the Splunk Application Framework..." try: splunk_home = os.environ.get("SPLUNK_HOME", "") splunk_5 = None splunkd_scheme = None splunkweb_scheme = None splunkd_port = None splunkweb_port = None splunkd_host = None splunkweb_host = None splunkweb_mount = None splunkdj_mount = None splunkdj_appserver_port = None splunkdj_proxy_port = None splunkdj_proxy_path = None while True: version_info = envoy.run([['%s/bin/splunk' % splunk_home, 'version']]) version = version_info.std_out.strip() if not (version.startswith("Splunk 5") or version.startswith("Splunk 6") or version.startswith("Splunk 201")): os.remove(path.join(MAIN_DIR, ".splunkhome")) print "The version must be >= 'Splunk 5.0', found '%s' in '%s'. Run 'splunkdj setup' and try again." % (version, splunk_home) sys.exit(1) if splunk_5 == None: splunk_5 = version.startswith("Splunk 5") # Get Python, Node and Splunk paths splunk_path = path.join(splunk_home, "bin", "splunk" + (".exe" if is_win32 else "")) python_path = path.join(splunk_home, "bin", "python" + (".exe" if is_win32 else "")) node_path = path.join(splunk_home, "bin", "node" + (".exe" if is_win32 else "")) python_exists = path.exists(python_path.strip()) node_exists = path.exists(node_path) # Ensure Python and Node exist if not python_exists: print "No Python interpreter, exiting..." sys.exit(1) if not node_exists: print "No Node.js interpreter, exiting..." sys.exit(1) # Get Various information from Splunk if not splunkd_port: splunkd_port = get_conf_value("web", "settings", "mgmtHostPort").split(":")[1] if not splunkweb_port: splunkweb_port = get_conf_value("web", "settings", "httpport") if not splunkweb_mount: splunkweb_mount = get_conf_value("web", "settings", "root_endpoint") if not splunkd_scheme: is_splunkd_ssl = normalizeBoolean(get_conf_value("server", "sslConfig", "enableSplunkdSSL")) splunkd_scheme = "https" if is_splunkd_ssl else "http" if not splunkweb_scheme: is_splunkweb_ssl = normalizeBoolean(get_conf_value("web", "settings", "enableSplunkWebSSL")) splunkweb_scheme = "https" if is_splunkweb_ssl else "http" splunkd_scheme = splunkd_scheme or "https" splunkd_host = splunkd_host or "localhost" splunkweb_scheme = splunkweb_scheme or "http" splunkweb_host = splunkweb_host or "localhost" splunkweb_mount = splunkweb_mount or "" splunkdj_mount = splunkdj_mount or "dj" splunkdj_appserver_port = splunkdj_appserver_port or SPLUNKDJ_APPSERVER_DEFAULT_PORT splunkdj_proxy_port = splunkdj_proxy_port or SPLUNKDJ_PROXY_DEFAULT_PORT splunkdj_proxy_path = splunkdj_proxy_path or SPLUNKDJ_PROXY_DEFAULT_PATH print "\nThe Splunk Application Framework will use the following values:" print " - Splunkd scheme: %s" % splunkd_scheme print " - Splunkd host: %s" % splunkd_host print " - Splunkd port: %s" % splunkd_port print " - Splunk Web scheme: %s" % splunkweb_scheme print " - Splunk Web host: %s" % splunkweb_host print " - Splunk Web port: %s" % splunkweb_port print " - Splunk Web root endpoint: %s" % splunkweb_mount print " - Web Framework Django appserver port: %s" % splunkdj_appserver_port print " - Web Framework proxy port: %s" % splunkdj_proxy_port print " - Web Framework proxy path: %s" % splunkdj_proxy_path print " - Web Framework mount: %s" % splunkdj_mount print " - Splunk installation (SPLUNK_HOME): %s" % splunk_home print " - Splunk 5: %s" % splunk_5 if confirm("\nAre these values correct ('y' to accept, 'n' to edit)", default=True): break splunkd_scheme = raw_input("Splunkd scheme [%s]: " % (splunkd_scheme)) or splunkd_scheme splunkd_host = raw_input("Splunkd host [%s]: " % (splunkd_host)) or splunkd_host splunkd_port = raw_input("Splunkd port [%s]: " % (splunkd_port)) or splunkd_port splunkweb_scheme = raw_input("Splunk Web scheme [%s]: " % (splunkweb_scheme)) or splunkweb_scheme splunkweb_host = raw_input("Splunk Web host [%s]: " % (splunkweb_host)) or splunkweb_host splunkweb_port = raw_input("Splunk Web port [%s]: " % (splunkweb_port)) or splunkweb_port splunkweb_mount = raw_input("Splunk Web mount [%s]: " % (splunkweb_mount)) or splunkweb_mount # Get information about Web Framework ports splunkdj_appserver_port = raw_input("Web Framework Django appserver port [%s]: " % (splunkdj_appserver_port)) or splunkdj_appserver_port while is_port_open("localhost", splunkdj_appserver_port): if confirm("Web Framework Django appserver port '%s' is taken. Would you like to change it" % splunkdj_appserver_port, default=True): splunkdj_appserver_port = raw_input("Web Framework appserver port [%s]: " % (splunkdj_appserver_port)) or splunkdj_appserver_port else: sys.exit(1) splunkdj_proxy_port = raw_input("Web Framework proxy port [%s]: " % (splunkdj_proxy_port)) or splunkdj_proxy_port while is_port_open("localhost", splunkdj_proxy_port): if confirm("Web Framework proxy port '%s' is taken. Would you like to change it" % splunkdj_proxy_port, default=True): splunkdj_proxy_port = raw_input("Web Framework proxy port [%s]: " % (splunkdj_proxy_port)) or splunkdj_proxy_port else: sys.exit(1) splunkdj_proxy_path = raw_input("Web Framework proxy path [%s]: " % splunkdj_proxy_path) or splunkdj_proxy_path splunkdj_mount = raw_input("Web Framework mount [%s]: " % splunkdj_mount) or splunkdj_mount splunk_home = raw_input("Splunk installation (SPLUNK_HOME) [%s]: " % splunk_home) or splunk_home splunk_home = path.expanduser(splunk_home) splunk_5 = normalizeBoolean(raw_input("Splunk 5 [%s]: " % splunk_5) or splunk_5) # Write out SPLUNK_HOME dot_splunkhome = open(path.join(MAIN_DIR, '.splunkhome'), 'w') dot_splunkhome.write(splunk_home) dot_splunkhome.flush() # Serialize configuration create_config_file( config_file_path = path.join(MAIN_DIR, file), splunkd_scheme = splunkd_scheme, splunk_home = splunk_home, splunkd_host = splunkd_host, splunkd_port = int(splunkd_port), splunkweb_scheme = splunkweb_scheme, splunkweb_host = splunkweb_host, splunkweb_port = int(splunkweb_port), splunkweb_mount = splunkweb_mount, x_frame_options_sameorigin = normalizeBoolean(get_conf_value("web", "settings", "x_frame_options_sameorigin")), mount = splunkdj_mount, raw_mount = splunkdj_mount, splunkdj_port = int(splunkdj_appserver_port), proxy_port = int(splunkdj_proxy_port), proxy_path = splunkdj_proxy_path, debug = False, quickstart = False, splunk_5 = splunk_5 ) if not nodeploy: print "\nInstalling default apps..." while True: username = raw_input("Splunk username: "******"Splunk password: "******"", force=True, file=DEFAULT_SPLUNKDJ_CONFIG_FILE, username=username, password=password, ) for app in SPLUNKDJ_DEFAULT_APPS: # Only deploy the default apps if they actually exist app_path = path.join(MAIN_DIR, "server", "apps", app) if not path.exists(app_path): continue args.appname = app deploy(args.appname, args.force, args.file, args.username, args.password) break except KeyboardInterrupt: print sys.exit(0) print "\nThe Splunk Application Framework setup is complete -- enter 'splunkdj run' to start."
def load_page(self, url, **kwargs): """ Proxy a web-page through so that a UI can be displayed for showing potential results. """ web_client = None try: # -------------------------------------- # 1: Make sure that user has permission to make inputs. We don't want to allow people # to use this as a general proxy. # -------------------------------------- if not WebInputController.hasCapability('edit_modinput_web_input'): return self.render_error_html('You need the "edit_modinput_web_input" capability ' + 'to make website inputs') # Don't allow proxying of the javascript files if url.endswith(".js"): cherrypy.response.headers['Content-Type'] = 'application/javascript' return "" # -------------------------------------- # 2: Only allow HTTPS if the install is on Splunk Cloud # -------------------------------------- if ModularInput.is_on_cloud(cherrypy.session.get('sessionKey')) and not url.startswith("https://"): return self.render_error_html('URLs on Splunk Cloud must use HTTPS protocol') # -------------------------------------- # 3: Perform a request for the page # -------------------------------------- # Get the proxy configuration conf_stanza = "default" try: web_input = WebInput(timeout=10) proxy_type, proxy_server, proxy_port, proxy_user, proxy_password = \ web_input.get_proxy_config(cherrypy.session.get('sessionKey'), conf_stanza) except splunk.ResourceNotFound: cherrypy.response.status = 202 return self.render_error_html("Proxy server information could not be obtained") # Get the timeout to use timeout = None if 'timeout' in kwargs: try: timeout = int(kwargs['timeout']) except ValueError: timeout = 15 else: timeout = 15 # Get the user-agent user_agent = kwargs.get('user_agent', None) # Get the information on the browser to use browser = None if 'browser' in kwargs: browser = kwargs['browser'] # Make the client if browser is None or browser == WebScraper.INTEGRATED_CLIENT: web_client = DefaultWebClient(timeout, user_agent, logger) elif browser == WebScraper.FIREFOX: web_client = FirefoxClient(timeout, user_agent, logger) elif browser == WebScraper.CHROME: web_client = ChromeClient(timeout, user_agent, logger) web_client.setProxy(proxy_type, proxy_server, proxy_port, proxy_user, proxy_password) # Get the username and password username = kwargs.get('username', None) password = kwargs.get('password', None) username_field = kwargs.get('username_field', None) password_field = kwargs.get('password_field', None) authentication_url = kwargs.get('authentication_url', None) if username is not None and password is not None: username = kwargs['username'] password = kwargs['password'] username_field = kwargs.get('username_field', None) password_field = kwargs.get('password_field', None) authentication_url = kwargs.get('authentication_url', None) web_client.setCredentials(username, password) if authentication_url is not None: logger.debug("Authenticating using form login in scrape_page") web_client.doFormLogin(authentication_url, username_field, password_field) # Get the page try: content = web_client.get_url(url, 'GET') response = web_client.get_response_headers() except: logger.exception("Exception generated while attempting to content for url=%s", url) cherrypy.response.status = 500 return self.render_error_html("Page preview could not be created using a web-browser") # -------------------------------------- # 4: Render the content with the browser if necessary # -------------------------------------- """ if 'text/html' in response['content-type']: # Get the information on the browser to use browser = None if 'browser' in kwargs: browser = kwargs['browser'] # Try rendering the content using a web-browser try: if browser is not None and browser != WebScraper.INTEGRATED_CLIENT: web_scraper = WebScraper(timeout=timeout) web_scraper.set_proxy(proxy_type, proxy_server, proxy_port, proxy_user, proxy_password) web_scraper.set_authentication(username, password) content = web_scraper.get_result_browser(urlparse.urlparse(url), browser) except: logger.exception("Exception generated while attempting to get browser rendering or url=%s", url) cherrypy.response.status = 500 return self.render_error_html("Page preview could not be created using a web-browser") """ # -------------------------------------- # 5: Rewrite the links in HTML files so that they also point to the internal proxy # -------------------------------------- if "<html" in content: # Parse the content html = lxml.html.document_fromstring(content) # Rewrite the links to point to this internal proxy rewrite_using_internal_proxy = True if rewrite_using_internal_proxy: def relocate_href(link): """ Change the hrefs such that they go through the proxy. """ link = urlparse.urljoin(url, link) if link.endswith(".js"): return "" if not link.endswith(".css"): return "load_page?url=" + link else: return link html.rewrite_links(relocate_href) # Block the href links for element, attribute, _, _ in html.iterlinks(): if element.tag == "a" and attribute == "href": element.set('href', "#") elif element.tag == "form" and attribute == "action": element.set('action', "?") else: html.make_links_absolute(url) # Determine if we should clean the JS clean_script = True if 'clean_script' in kwargs: clean_script = util.normalizeBoolean(kwargs['clean_script']) # Determine if we should clean the CSS clean_styles = False if 'clean_styles' in kwargs: clean_styles = util.normalizeBoolean(kwargs['clean_styles']) # Clean up the HTML if clean_styles or clean_script: kill_tags = [] if clean_script: kill_tags = ["script"] # Remove the script blocks cleaner = Cleaner(page_structure=False, kill_tags=kill_tags, javascript=False, links=False, style=clean_styles, safe_attrs_only=False) # Get the content content = lxml.html.tostring(cleaner.clean_html(html)) else: content = lxml.html.tostring(html) # -------------------------------------- # 6: Respond with the results # -------------------------------------- if 'content-type' in response: cherrypy.response.headers['Content-Type'] = response['content-type'] else: cherrypy.response.headers['Content-Type'] = 'text/html' # -------------------------------------- # 7: Clear Javascript files # -------------------------------------- if response.get('content-type', "") == "application/javascript" \ or response.get('content-type', "") == "application/x-javascript" \ or response.get('content-type', "") == "text/javascript" \ or url.endswith(".js"): return "" return content except LoginFormNotFound: logger.debug("Login form not found") return self.render_error_html("Login form was not found") except FormAuthenticationFailed as e: logger.debug("Form authentication failed: " + str(e)) return self.render_error_html("Form authentication failed: " + str(e)) except: logger.exception("Error when attempting to proxy an HTTP request") cherrypy.response.status = 500 return self.render_error_html("Page preview could not be created") finally: if web_client: web_client.close()
def getJobCount(jobContent): if jobContent.get('statusBuckets') == 0 or (normalizeBoolean(jobContent.get('reportSearch')) and not re.match('sendemail', jobContent.get('reportSearch'))): return jobContent.get('resultCount') else: return jobContent.get('eventCount')
def run(blocking=True): # get confs global_cfg = splunk_to_cherry_cfg('web', 'settings') # allow command line arguments to override the configuration # eg. --httpport=80 args = util.args_to_dict() # debugging can be turned on from the command line with --debug if args.get('debug'): del args['debug'] logger.setLevel(logging.DEBUG) for lname, litem in logger.manager.loggerDict.items(): if not isinstance(litem, logging.PlaceHolder): logger.debug("Updating logger=%s to level=DEBUG" % lname) litem.setLevel(logging.DEBUG) args['js_logger_mode'] = 'Server' args['js_no_cache'] = True global_cfg.update(args) # support SPLUNK_BINDIP backwards compatibly. -- overrides web.conf if os.environ.has_key('SPLUNK_BINDIP'): global_cfg['server.socket_host'] = os.environ['SPLUNK_BINDIP'].strip() global_cfg['server.socket_port'] = global_cfg['httpport'] if normalizeBoolean(global_cfg.get('enableSplunkWebSSL', False)): logger.info('Enabling SSL') priv_key_path = str(global_cfg['privKeyPath']) ssl_certificate = str(global_cfg['caCertPath']) ssl_ciphers = str(global_cfg['cipherSuite']) if os.path.isabs(priv_key_path): global_cfg['server.ssl_private_key'] = priv_key_path else: global_cfg['server.ssl_private_key'] = make_splunkhome_path([priv_key_path]) if os.path.isabs(ssl_certificate): global_cfg['server.ssl_certificate'] = ssl_certificate else: global_cfg['server.ssl_certificate'] = make_splunkhome_path([ssl_certificate]) if not os.path.exists(global_cfg['server.ssl_private_key']): raise ValueError("%s Not Found" % global_cfg['server.ssl_private_key']) if not os.path.exists(global_cfg['server.ssl_certificate']): raise ValueError("%s Not Found" % global_cfg['server.ssl_certificate']) if global_cfg.get('supportSSLV3Only'): global_cfg['server.ssl_v3_only'] = True if ssl_ciphers: global_cfg['server.ssl_ciphers'] = ssl_ciphers else: # make sure the secure flag is not set on session cookies if we're not serving over SSL global_cfg['tools.sessions.secure'] = False # setup cherrypy logging infrastructure if global_cfg.has_key('log.access_file'): filename = make_absolute(global_cfg['log.access_file'], BASE_LOG_PATH) maxsize = int(global_cfg.get('log.access_maxsize', 0)) maxcount = int(global_cfg.get('log.access_maxfiles', 5)) if maxsize > 0: cherrypy.log.access_file = '' h = logging.handlers.RotatingFileHandler(filename, 'a', maxsize, maxcount) h.setLevel(logging.INFO) h.setFormatter(_cplogging.logfmt) cherrypy.log.access_log.addHandler(h) del global_cfg['log.access_file'] else: global_cfg['log.access_file'] = filename if global_cfg.has_key('log.error_file'): # we've already committed to web_service.log by this point del global_cfg['log.error_file'] cherrypy.log.error_file = '' cherrypy.log.error_log.addHandler(splunk_log_handler) if global_cfg.has_key('log.error_maxsize'): splunk_log_handler.maxBytes = int(global_cfg['log.error_maxsize']) splunk_log_handler.backupCount = int(global_cfg.get('log.error_maxfiles', 5)) # now that we have somewhere to log, test the ssl keys. - SPL-34126 # Lousy solution, but python's ssl itself hangs with encrypted keys, so avoid hang by # bailing with a message if global_cfg['enableSplunkWebSSL']: for cert_file in (global_cfg['server.ssl_private_key'], global_cfg['server.ssl_certificate']): if is_encrypted_cert(cert_file): logger.error("""Specified cert '%s' is encrypted with a passphrase. SplunkWeb does not support passphrase-encrypted keys at this time. To resolve the problem, decrypt the keys on disk, generate new passphrase-less keys, or disable ssl for SplunkWeb.""" % cert_file) raise Exception("Unsupported encrypted cert file.") # set login settings if global_cfg.get('tools.sessions.storage_type') == 'file': global_cfg['tools.sessions.storage_path'] = make_absolute(global_cfg['tools.sessions.storage_path']) # SPL-16963: add port number to session key to allow for sessions for multiple # instances to run on a single host, without mutually logging each other out. global_cfg['tools.sessions.name'] = "session_id_%s" % global_cfg['httpport'] # set mako template cache directory global_cfg.setdefault('mako_cache_path', MAKO_CACHE_PATH) root_name = global_cfg.get('root_endpoint', FAILSAFE_ROOT_ENDPOINT).strip('/') ctrl = TopController() cfg = {'global' : global_cfg} # initialize all of the custom endpoints that are registered in the # apps ctrl.custom.load_handlers() # Serve static files if so configured if global_cfg.has_key('static_endpoint'): mount_static(ctrl, global_cfg, cfg) if global_cfg.has_key('testing_endpoint'): if (global_cfg.get('static_dir','') == '') : logger.warn('testing endpoint configured, but no testing directory. Falling back to ' + FAILSAFE_TESTING_DIR) staticdir = make_absolute(global_cfg.get('testing_dir', FAILSAFE_TESTING_DIR), '') cfg[global_cfg['testing_endpoint']] = { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : staticdir, 'tools.staticdir.strip_version' : True } if global_cfg.has_key('rss_endpoint'): logger.debug('Checking for shared storage location') rssdir = get_rss_parent_dir() if len(rssdir) > 0: logger.debug('Using shared storage location: %s' % rssdir) else: rssdir = make_absolute(global_cfg.get('rss_dir', FAILSAFE_RSS_DIR), '') logger.debug('No shared storage location configured, using: %s' % rssdir) cfg[global_cfg['rss_endpoint']] = { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : rssdir, 'tools.staticdir.strip_version' : False, 'tools.staticdir.default_ext' : 'xml', 'error_page.404': make_splunkhome_path([FAILSAFE_STATIC_DIR, 'html', 'rss_404.html']) } # Modules served statically out of /modules or out of an app's modules dir def module_resolver(section, branch, dir): from lib.apps import local_apps # first part of branch is the module name parts = os.path.normpath(branch.strip('/')).replace(os.path.sep, '/').split('/') locale = i18n.current_lang(True) if not parts: return False module_path = local_apps.getModulePath(parts[0]) if module_path: fn = os.path.join(module_path, *parts[1:]) if fn.endswith('.js') and os.path.exists(fn): return i18n.translate_js(fn) # returns the path to a cached file containing the original js + json translation map return fn elif parts[0].startswith('modules-') and parts[0].endswith('.js'): hash = parts[0].replace('modules-', '').replace('.min.js', '') return make_absolute(os.path.join(i18n.CACHE_PATH, '%s-%s-%s.cache' % ('modules.min.js', hash, locale))) elif parts[0].startswith('modules-') and parts[0].endswith('.css'): return filechain.MODULE_STATIC_CACHE_PATH + os.sep + 'css' + os.sep + parts[0] return False moddir = make_absolute(global_cfg.get('module_dir', FAILSAFE_MODULE_PATH)) cfg['/modules'] = { 'tools.staticdir.strip_version' : True, 'tools.staticdir.on' : True, 'tools.staticdir.match' : re.compile(r'.*\.(?!html$|spec$|py$)'), # only files with extensions other than .html, .py and .spec are served 'tools.staticdir.dir' : moddir, 'tools.staticdir.resolver' : module_resolver, 'tools.staticdir.content_types' : {'js' : 'application/javascript'} # correct python's application/x-javascript } cfg['/'] = { 'request.dispatch': i18n.I18NDispatcher(), } # enable gzip + i18n goodness if global_cfg.get('enable_gzip', False): cfg['/'].update({ 'tools.gzip.on' : True, 'tools.gzip.mime_types' : ['text/plain', 'text/html', 'text/css', 'application/javascript', 'application/x-javascript'], }) #cfg['/']['tools.gzip.on'] = False # Set maximum filesize we can receive (in MB) maxsize = global_cfg.get('max_upload_size', DEFAULT_MAX_UPLOAD_SIZE) cfg['global']['server.max_request_body_size'] = int(maxsize) * 1024 * 1024 if global_cfg.get('enable_throttle', False): from lib import throttle cfg['global'].update({ 'tools.throttle.on' : True, 'tools.throttle.bandwidth': int(global_cfg.get('throttle_bandwidth', 50)), 'tools.throttle.latency': int(global_cfg.get('throttle_latency', 100)) }) if global_cfg.get('enable_log_runtime', False): points = global_cfg.get('enable_log_runtime') if points == 'All': points = 'on_start_resource,before_request_body,before_handler,before_finalize,on_end_resource,on_end_request' if points is True: points = 'on_end_resource' for point in points.split(','): def log_closure(point): def log(): import time starttime = cherrypy.response.time endtime = time.time() delta = (endtime - starttime) * 1000 logger.warn('log_runtime point=%s path="%s" start=%f end=%f delta_ms=%.1f' % (point, cherrypy.request.path_info, starttime, endtime, delta)) return log setattr(cherrypy.tools, 'log_'+point, cherrypy.Tool(point, log_closure(point))) cfg['/']['tools.log_%s.on' % point] = True if global_cfg.get('storm_enabled'): from splunk.appserver.mrsparkle.lib.storm import hook_storm_session hook_storm_session() # setup handler to create and remove the pidfile pid_path = make_absolute(global_cfg.get('pid_path', PID_PATH)) ProcessID(cherrypy.engine, pid_path).subscribe() # # process splunkd status information # startup.initVersionInfo() # set start time for restart checking cfg['global']['start_time'] = time.time() # setup global error handling page cfg['global']['error_page.default'] = error.handleError # # TODO: refactor me into locale stuff # cfg['global']['DISPATCH_TIME_FORMAT'] = '%s.%Q' # END # Common splunk paths cfg['global']['etc_path'] = make_absolute(SPLUNK_ETC_PATH) cfg['global']['site_packages_path'] = make_absolute(SPLUNK_SITE_PACKAGES_PATH) cfg['global']['mrsparkle_path'] = make_absolute(SPLUNK_MRSPARKLE_PATH) listen_on_ipv6 = global_cfg.get('listenOnIPv6') socket_host = global_cfg.get('server.socket_host') if not socket_host: if listen_on_ipv6: socket_host = global_cfg['server.socket_host'] = '::' else: socket_host = global_cfg['server.socket_host'] = '0.0.0.0' logger.info("server.socket_host defaulting to %s" % socket_host) if ':' in socket_host: if not listen_on_ipv6: logger.warn('server.socket_host was set to IPv6 address "%s", so ignoring listenOnIPv6 value of "%s"' % (socket_host, listen_on_ipv6)) else: if listen_on_ipv6: logger.warn('server.socket_host was to to IPv4 address "%s", so ignoring listenOnIPv6 values of "%s"' % (socket_host, listen_on_ipv6)) if socket_host == '::': # Start a second server to listen to the IPV6 socket if isinstance(listen_on_ipv6, bool) or listen_on_ipv6.lower() != 'only': global_cfg['server.socket_host'] = '0.0.0.0' from cherrypy import _cpserver from cherrypy import _cpwsgi_server server2 = _cpserver.Server() server2.httpserver = _cpwsgi_server.CPWSGIServer() server2.httpserver.bind_addr = ('::', global_cfg['server.socket_port']) server2.socket_host = '::' server2.socket_port = global_cfg['server.socket_port'] for key in ('ssl_private_key', 'ssl_certificate', 'ssl_v3_only', 'ssl_ciphers'): if 'server.'+key in global_cfg: setattr(server2, key, global_cfg['server.'+key]) setattr(server2.httpserver, key, global_cfg['server.'+key]) server2.subscribe() if root_name: # redirect / to the root endpoint cherrypy.tree.mount(RootController(), '/', cfg) cherrypy.config.update(cfg) if global_cfg.get('enable_profile', False): from cherrypy.lib import profiler cherrypy.tree.graft( profiler.make_app(cherrypy.Application(ctrl, '/' + root_name, cfg), path=global_cfg.get('profile_path', '/tmp/profile')), '/' + root_name ) else: cherrypy.tree.mount(ctrl, '/' + root_name, cfg) cherrypy.engine.signal_handler.subscribe() # this makes Ctrl-C work when running in nodaemon if splunk.clilib.cli_common.isWindows: from cherrypy.process import win32 cherrypy.console_control_handler = win32.ConsoleCtrlHandler(cherrypy.engine) cherrypy.engine.console_control_handler.subscribe() # log active config for k in sorted(cherrypy.config): logger.info('CONFIG: %s (%s): %s' % (k, type(cherrypy.config[k]).__name__, cherrypy.config[k])) cherrypy.engine.start() # clean up caches on init filechain.clear_cache() i18n.init_js_cache() if blocking: # this routine that starts this as a windows service will not want us to block here. cherrypy.engine.block()
return output # # main # # merge any passed args args = DEFAULT_ARGS for item in sys.argv: kv = item.split('=') if len(kv) > 1: val = item[item.find('=') + 1:] try: val = int(val) except: pass args[kv[0]] = util.normalizeBoolean(val) # run generator try: for c in range(args['chunks']): if (c > 0 and args['chunkdelay'] > 0): time.sleep(args['chunkdelay']) results = generateData(c, **args) isp.outputStreamResults(results) except: sys.stdout.write("FAILED: \n") traceback.print_exc()