def sync_apps(profile=False, validate=False, conquer=False): apps = [] for base, app in cherrypy.tree.apps.iteritems(): if base == "/": base = "" if profile: app = profiler.make_app(app, aggregate=False) if conquer: try: import wsgiconq except ImportError: warnings.warn( "Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app) if validate: try: from wsgiref import validate except ImportError: warnings.warn( "Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) apps.append((base, app)) apps.sort() apps.reverse() for s in cherrypy.server.httpservers: s.mount_points = apps
def sync_apps(profile=False, validate=False, conquer=False): apps = [] for base, app in cherrypy.tree.apps.iteritems(): if base == "/": base = "" if profile: app = profiler.make_app(app, aggregate=False) if conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app) if validate: try: from wsgiref import validate except ImportError: warnings.warn("Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) apps.append((base, app)) apps.sort() apps.reverse() for s in cherrypy.server.httpservers: s.mount_points = apps
def sync_apps(profile=False, validate=False, conquer=False): app = cherrypy.tree if profile: app = profiler.make_app(app, aggregate=False) if conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app) if validate: try: from wsgiref import validate except ImportError: warnings.warn( "Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) h = cherrypy.server.httpserver if hasattr(h, 'wsgi_app'): # CherryPy's wsgiserver h.wsgi_app = app elif hasattr(h, 'fcgiserver'): # flup's WSGIServer h.fcgiserver.application = app elif hasattr(h, 'scgiserver'): # flup's WSGIServer h.scgiserver.application = app
def sync_apps(profile=False, validate=False, conquer=False): app = cherrypy.tree if profile: app = profiler.make_app(app, aggregate=False) if conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app) if validate: try: from wsgiref import validate except ImportError: warnings.warn("Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) h = cherrypy.server.httpserver if hasattr(h, 'wsgi_app'): # CherryPy's wsgiserver h.wsgi_app = app elif hasattr(h, 'fcgiserver'): # flup's WSGIServer h.fcgiserver.application = app elif hasattr(h, 'scgiserver'): # flup's WSGIServer h.scgiserver.application = app
def sync_apps(profile=False): apps = [] for base, app in cherrypy.tree.apps.iteritems(): if base == "/": base = "" if profile: apps.append((base, profiler.make_app(app, aggregate=False))) else: apps.append((base, app)) apps.sort() apps.reverse() for s in cherrypy.server.httpservers: s.mount_points = apps
def sync_apps(profile=False, validate=False, conquer=False): app = cherrypy.tree if profile: app = profiler.make_app(app, aggregate=False) if conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app) if validate: try: from wsgiref import validate except ImportError: warnings.warn("Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) cherrypy.server.httpserver.wsgi_app = app
def get_app(self): """Obtain a new (decorated) WSGI app to hook into the origin server.""" import cherrypy app = cherrypy.tree if self.profile: app = profiler.make_app(app, aggregate=False) if self.conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app, c_calls=True) if self.validate: try: from wsgiref import validate except ImportError: warnings.warn("Error importing wsgiref. The validator will not run.") else: app = validate.validator(app) return app
server2.socket_port = global_cfg['server.socket_port'] for key in ('ssl_private_key', 'ssl_certificate', 'ssl_v3_only', 'ssl_ciphers'): if 'server.'+key in global_cfg: setattr(server2, key, global_cfg['server.'+key]) setattr(server2.httpserver, key, global_cfg['server.'+key]) server2.subscribe() if root_name: # redirect / to the root endpoint cherrypy.tree.mount(RootController(), '/', cfg) cherrypy.config.update(cfg) if global_cfg.get('enable_profile', False): from cherrypy.lib import profiler cherrypy.tree.graft( profiler.make_app(cherrypy.Application(ctrl, '/' + root_name, cfg), path=global_cfg.get('profile_path', '/tmp/profile')), '/' + root_name ) else: cherrypy.tree.mount(ctrl, '/' + root_name, cfg) cherrypy.engine.signal_handler.subscribe() # this makes Ctrl-C work when running in nodaemon if splunk.clilib.cli_common.isWindows: from cherrypy.process import win32 cherrypy.console_control_handler = win32.ConsoleCtrlHandler(cherrypy.engine) cherrypy.engine.console_control_handler.subscribe() # log active config for k in sorted(cherrypy.config): logger.info('CONFIG: %s (%s): %s' % (k, type(cherrypy.config[k]).__name__, cherrypy.config[k]))
def run(blocking=True): # get confs global_cfg = splunk_to_cherry_cfg('web', 'settings') # allow command line arguments to override the configuration # eg. --httpport=80 args = util.args_to_dict() # debugging can be turned on from the command line with --debug if args.get('debug'): del args['debug'] logger.setLevel(logging.DEBUG) for lname, litem in logger.manager.loggerDict.items(): if not isinstance(litem, logging.PlaceHolder): logger.debug("Updating logger=%s to level=DEBUG" % lname) litem.setLevel(logging.DEBUG) args['js_logger_mode'] = 'Server' args['js_no_cache'] = True global_cfg.update(args) # support SPLUNK_BINDIP backwards compatibly. -- overrides web.conf if os.environ.has_key('SPLUNK_BINDIP'): global_cfg['server.socket_host'] = os.environ['SPLUNK_BINDIP'].strip() global_cfg['server.socket_port'] = global_cfg['httpport'] if normalizeBoolean(global_cfg.get('enableSplunkWebSSL', False)): logger.info('Enabling SSL') priv_key_path = str(global_cfg['privKeyPath']) ssl_certificate = str(global_cfg['caCertPath']) ssl_ciphers = str(global_cfg['cipherSuite']) if os.path.isabs(priv_key_path): global_cfg['server.ssl_private_key'] = priv_key_path else: global_cfg['server.ssl_private_key'] = make_splunkhome_path([priv_key_path]) if os.path.isabs(ssl_certificate): global_cfg['server.ssl_certificate'] = ssl_certificate else: global_cfg['server.ssl_certificate'] = make_splunkhome_path([ssl_certificate]) if not os.path.exists(global_cfg['server.ssl_private_key']): raise ValueError("%s Not Found" % global_cfg['server.ssl_private_key']) if not os.path.exists(global_cfg['server.ssl_certificate']): raise ValueError("%s Not Found" % global_cfg['server.ssl_certificate']) if global_cfg.get('supportSSLV3Only'): global_cfg['server.ssl_v3_only'] = True if ssl_ciphers: global_cfg['server.ssl_ciphers'] = ssl_ciphers else: # make sure the secure flag is not set on session cookies if we're not serving over SSL global_cfg['tools.sessions.secure'] = False # setup cherrypy logging infrastructure if global_cfg.has_key('log.access_file'): filename = make_absolute(global_cfg['log.access_file'], BASE_LOG_PATH) maxsize = int(global_cfg.get('log.access_maxsize', 0)) maxcount = int(global_cfg.get('log.access_maxfiles', 5)) if maxsize > 0: cherrypy.log.access_file = '' h = logging.handlers.RotatingFileHandler(filename, 'a', maxsize, maxcount) h.setLevel(logging.INFO) h.setFormatter(_cplogging.logfmt) cherrypy.log.access_log.addHandler(h) del global_cfg['log.access_file'] else: global_cfg['log.access_file'] = filename if global_cfg.has_key('log.error_file'): # we've already committed to web_service.log by this point del global_cfg['log.error_file'] cherrypy.log.error_file = '' cherrypy.log.error_log.addHandler(splunk_log_handler) if global_cfg.has_key('log.error_maxsize'): splunk_log_handler.maxBytes = int(global_cfg['log.error_maxsize']) splunk_log_handler.backupCount = int(global_cfg.get('log.error_maxfiles', 5)) # now that we have somewhere to log, test the ssl keys. - SPL-34126 # Lousy solution, but python's ssl itself hangs with encrypted keys, so avoid hang by # bailing with a message if global_cfg['enableSplunkWebSSL']: for cert_file in (global_cfg['server.ssl_private_key'], global_cfg['server.ssl_certificate']): if is_encrypted_cert(cert_file): logger.error("""Specified cert '%s' is encrypted with a passphrase. SplunkWeb does not support passphrase-encrypted keys at this time. To resolve the problem, decrypt the keys on disk, generate new passphrase-less keys, or disable ssl for SplunkWeb.""" % cert_file) raise Exception("Unsupported encrypted cert file.") # set login settings if global_cfg.get('tools.sessions.storage_type') == 'file': global_cfg['tools.sessions.storage_path'] = make_absolute(global_cfg['tools.sessions.storage_path']) # SPL-16963: add port number to session key to allow for sessions for multiple # instances to run on a single host, without mutually logging each other out. global_cfg['tools.sessions.name'] = "session_id_%s" % global_cfg['httpport'] # set mako template cache directory global_cfg.setdefault('mako_cache_path', MAKO_CACHE_PATH) root_name = global_cfg.get('root_endpoint', FAILSAFE_ROOT_ENDPOINT).strip('/') ctrl = TopController() cfg = {'global' : global_cfg} # initialize all of the custom endpoints that are registered in the # apps ctrl.custom.load_handlers() # Serve static files if so configured if global_cfg.has_key('static_endpoint'): mount_static(ctrl, global_cfg, cfg) if global_cfg.has_key('testing_endpoint'): if (global_cfg.get('static_dir','') == '') : logger.warn('testing endpoint configured, but no testing directory. Falling back to ' + FAILSAFE_TESTING_DIR) staticdir = make_absolute(global_cfg.get('testing_dir', FAILSAFE_TESTING_DIR), '') cfg[global_cfg['testing_endpoint']] = { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : staticdir, 'tools.staticdir.strip_version' : True } if global_cfg.has_key('rss_endpoint'): logger.debug('Checking for shared storage location') rssdir = get_rss_parent_dir() if len(rssdir) > 0: logger.debug('Using shared storage location: %s' % rssdir) else: rssdir = make_absolute(global_cfg.get('rss_dir', FAILSAFE_RSS_DIR), '') logger.debug('No shared storage location configured, using: %s' % rssdir) cfg[global_cfg['rss_endpoint']] = { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : rssdir, 'tools.staticdir.strip_version' : False, 'tools.staticdir.default_ext' : 'xml', 'error_page.404': make_splunkhome_path([FAILSAFE_STATIC_DIR, 'html', 'rss_404.html']) } # Modules served statically out of /modules or out of an app's modules dir def module_resolver(section, branch, dir): from lib.apps import local_apps # first part of branch is the module name parts = os.path.normpath(branch.strip('/')).replace(os.path.sep, '/').split('/') locale = i18n.current_lang(True) if not parts: return False module_path = local_apps.getModulePath(parts[0]) if module_path: fn = os.path.join(module_path, *parts[1:]) if fn.endswith('.js') and os.path.exists(fn): return i18n.translate_js(fn) # returns the path to a cached file containing the original js + json translation map return fn elif parts[0].startswith('modules-') and parts[0].endswith('.js'): hash = parts[0].replace('modules-', '').replace('.min.js', '') return make_absolute(os.path.join(i18n.CACHE_PATH, '%s-%s-%s.cache' % ('modules.min.js', hash, locale))) elif parts[0].startswith('modules-') and parts[0].endswith('.css'): return filechain.MODULE_STATIC_CACHE_PATH + os.sep + 'css' + os.sep + parts[0] return False moddir = make_absolute(global_cfg.get('module_dir', FAILSAFE_MODULE_PATH)) cfg['/modules'] = { 'tools.staticdir.strip_version' : True, 'tools.staticdir.on' : True, 'tools.staticdir.match' : re.compile(r'.*\.(?!html$|spec$|py$)'), # only files with extensions other than .html, .py and .spec are served 'tools.staticdir.dir' : moddir, 'tools.staticdir.resolver' : module_resolver, 'tools.staticdir.content_types' : {'js' : 'application/javascript'} # correct python's application/x-javascript } cfg['/'] = { 'request.dispatch': i18n.I18NDispatcher(), } # enable gzip + i18n goodness if global_cfg.get('enable_gzip', False): cfg['/'].update({ 'tools.gzip.on' : True, 'tools.gzip.mime_types' : ['text/plain', 'text/html', 'text/css', 'application/javascript', 'application/x-javascript'], }) #cfg['/']['tools.gzip.on'] = False # Set maximum filesize we can receive (in MB) maxsize = global_cfg.get('max_upload_size', DEFAULT_MAX_UPLOAD_SIZE) cfg['global']['server.max_request_body_size'] = int(maxsize) * 1024 * 1024 if global_cfg.get('enable_throttle', False): from lib import throttle cfg['global'].update({ 'tools.throttle.on' : True, 'tools.throttle.bandwidth': int(global_cfg.get('throttle_bandwidth', 50)), 'tools.throttle.latency': int(global_cfg.get('throttle_latency', 100)) }) if global_cfg.get('enable_log_runtime', False): points = global_cfg.get('enable_log_runtime') if points == 'All': points = 'on_start_resource,before_request_body,before_handler,before_finalize,on_end_resource,on_end_request' if points is True: points = 'on_end_resource' for point in points.split(','): def log_closure(point): def log(): import time starttime = cherrypy.response.time endtime = time.time() delta = (endtime - starttime) * 1000 logger.warn('log_runtime point=%s path="%s" start=%f end=%f delta_ms=%.1f' % (point, cherrypy.request.path_info, starttime, endtime, delta)) return log setattr(cherrypy.tools, 'log_'+point, cherrypy.Tool(point, log_closure(point))) cfg['/']['tools.log_%s.on' % point] = True if global_cfg.get('storm_enabled'): from splunk.appserver.mrsparkle.lib.storm import hook_storm_session hook_storm_session() # setup handler to create and remove the pidfile pid_path = make_absolute(global_cfg.get('pid_path', PID_PATH)) ProcessID(cherrypy.engine, pid_path).subscribe() # # process splunkd status information # startup.initVersionInfo() # set start time for restart checking cfg['global']['start_time'] = time.time() # setup global error handling page cfg['global']['error_page.default'] = error.handleError # # TODO: refactor me into locale stuff # cfg['global']['DISPATCH_TIME_FORMAT'] = '%s.%Q' # END # Common splunk paths cfg['global']['etc_path'] = make_absolute(SPLUNK_ETC_PATH) cfg['global']['site_packages_path'] = make_absolute(SPLUNK_SITE_PACKAGES_PATH) cfg['global']['mrsparkle_path'] = make_absolute(SPLUNK_MRSPARKLE_PATH) listen_on_ipv6 = global_cfg.get('listenOnIPv6') socket_host = global_cfg.get('server.socket_host') if not socket_host: if listen_on_ipv6: socket_host = global_cfg['server.socket_host'] = '::' else: socket_host = global_cfg['server.socket_host'] = '0.0.0.0' logger.info("server.socket_host defaulting to %s" % socket_host) if ':' in socket_host: if not listen_on_ipv6: logger.warn('server.socket_host was set to IPv6 address "%s", so ignoring listenOnIPv6 value of "%s"' % (socket_host, listen_on_ipv6)) else: if listen_on_ipv6: logger.warn('server.socket_host was to to IPv4 address "%s", so ignoring listenOnIPv6 values of "%s"' % (socket_host, listen_on_ipv6)) if socket_host == '::': # Start a second server to listen to the IPV6 socket if isinstance(listen_on_ipv6, bool) or listen_on_ipv6.lower() != 'only': global_cfg['server.socket_host'] = '0.0.0.0' from cherrypy import _cpserver from cherrypy import _cpwsgi_server server2 = _cpserver.Server() server2.httpserver = _cpwsgi_server.CPWSGIServer() server2.httpserver.bind_addr = ('::', global_cfg['server.socket_port']) server2.socket_host = '::' server2.socket_port = global_cfg['server.socket_port'] for key in ('ssl_private_key', 'ssl_certificate', 'ssl_v3_only', 'ssl_ciphers'): if 'server.'+key in global_cfg: setattr(server2, key, global_cfg['server.'+key]) setattr(server2.httpserver, key, global_cfg['server.'+key]) server2.subscribe() if root_name: # redirect / to the root endpoint cherrypy.tree.mount(RootController(), '/', cfg) cherrypy.config.update(cfg) if global_cfg.get('enable_profile', False): from cherrypy.lib import profiler cherrypy.tree.graft( profiler.make_app(cherrypy.Application(ctrl, '/' + root_name, cfg), path=global_cfg.get('profile_path', '/tmp/profile')), '/' + root_name ) else: cherrypy.tree.mount(ctrl, '/' + root_name, cfg) cherrypy.engine.signal_handler.subscribe() # this makes Ctrl-C work when running in nodaemon if splunk.clilib.cli_common.isWindows: from cherrypy.process import win32 cherrypy.console_control_handler = win32.ConsoleCtrlHandler(cherrypy.engine) cherrypy.engine.console_control_handler.subscribe() # log active config for k in sorted(cherrypy.config): logger.info('CONFIG: %s (%s): %s' % (k, type(cherrypy.config[k]).__name__, cherrypy.config[k])) cherrypy.engine.start() # clean up caches on init filechain.clear_cache() i18n.init_js_cache() if blocking: # this routine that starts this as a windows service will not want us to block here. cherrypy.engine.block()
def run(blocking=True): # get confs global_cfg = splunk_to_cherry_cfg('web', 'settings') # allow command line arguments to override the configuration # eg. --httpport=80 args = util.args_to_dict() # debugging can be turned on from the command line with --debug if args.get('debug'): del args['debug'] logger.setLevel(logging.DEBUG) for lname, litem in logger.manager.loggerDict.items(): if not isinstance(litem, logging.PlaceHolder): logger.debug("Updating logger=%s to level=DEBUG" % lname) litem.setLevel(logging.DEBUG) args['js_logger_mode'] = 'Server' args['js_no_cache'] = True global_cfg.update(args) # support SPLUNK_BINDIP backwards compatibly. -- overrides web.conf if os.environ.has_key('SPLUNK_BINDIP'): global_cfg['server.socket_host'] = os.environ[ 'SPLUNK_BINDIP'].strip() global_cfg['server.socket_port'] = global_cfg['httpport'] if normalizeBoolean(global_cfg.get('enableSplunkWebSSL', False)): logger.info('Enabling SSL') priv_key_path = str(global_cfg['privKeyPath']) ssl_certificate = str(global_cfg['caCertPath']) ssl_ciphers = str(global_cfg['cipherSuite']) if os.path.isabs(priv_key_path): global_cfg['server.ssl_private_key'] = priv_key_path else: global_cfg['server.ssl_private_key'] = make_splunkhome_path( [priv_key_path]) if os.path.isabs(ssl_certificate): global_cfg['server.ssl_certificate'] = ssl_certificate else: global_cfg['server.ssl_certificate'] = make_splunkhome_path( [ssl_certificate]) if not os.path.exists(global_cfg['server.ssl_private_key']): raise ValueError("%s Not Found" % global_cfg['server.ssl_private_key']) if not os.path.exists(global_cfg['server.ssl_certificate']): raise ValueError("%s Not Found" % global_cfg['server.ssl_certificate']) if global_cfg.get('supportSSLV3Only'): global_cfg['server.ssl_v3_only'] = True if ssl_ciphers: global_cfg['server.ssl_ciphers'] = ssl_ciphers else: # make sure the secure flag is not set on session cookies if we're not serving over SSL global_cfg['tools.sessions.secure'] = False # setup cherrypy logging infrastructure if global_cfg.has_key('log.access_file'): filename = make_absolute(global_cfg['log.access_file'], BASE_LOG_PATH) maxsize = int(global_cfg.get('log.access_maxsize', 0)) maxcount = int(global_cfg.get('log.access_maxfiles', 5)) if maxsize > 0: cherrypy.log.access_file = '' h = logging.handlers.RotatingFileHandler( filename, 'a', maxsize, maxcount) h.setLevel(logging.INFO) h.setFormatter(_cplogging.logfmt) cherrypy.log.access_log.addHandler(h) del global_cfg['log.access_file'] else: global_cfg['log.access_file'] = filename if global_cfg.has_key('log.error_file'): # we've already committed to web_service.log by this point del global_cfg['log.error_file'] cherrypy.log.error_file = '' cherrypy.log.error_log.addHandler(splunk_log_handler) if global_cfg.has_key('log.error_maxsize'): splunk_log_handler.maxBytes = int(global_cfg['log.error_maxsize']) splunk_log_handler.backupCount = int( global_cfg.get('log.error_maxfiles', 5)) # now that we have somewhere to log, test the ssl keys. - SPL-34126 # Lousy solution, but python's ssl itself hangs with encrypted keys, so avoid hang by # bailing with a message if global_cfg['enableSplunkWebSSL']: for cert_file in (global_cfg['server.ssl_private_key'], global_cfg['server.ssl_certificate']): if is_encrypted_cert(cert_file): logger.error( """Specified cert '%s' is encrypted with a passphrase. SplunkWeb does not support passphrase-encrypted keys at this time. To resolve the problem, decrypt the keys on disk, generate new passphrase-less keys, or disable ssl for SplunkWeb.""" % cert_file) raise Exception("Unsupported encrypted cert file.") # set login settings if global_cfg.get('tools.sessions.storage_type') == 'file': global_cfg['tools.sessions.storage_path'] = make_absolute( global_cfg['tools.sessions.storage_path']) # SPL-16963: add port number to session key to allow for sessions for multiple # instances to run on a single host, without mutually logging each other out. global_cfg[ 'tools.sessions.name'] = "session_id_%s" % global_cfg['httpport'] # set mako template cache directory global_cfg.setdefault('mako_cache_path', MAKO_CACHE_PATH) root_name = global_cfg.get('root_endpoint', FAILSAFE_ROOT_ENDPOINT).strip('/') ctrl = TopController() cfg = {'global': global_cfg} # initialize all of the custom endpoints that are registered in the # apps ctrl.custom.load_handlers() # Serve static files if so configured if global_cfg.has_key('static_endpoint'): mount_static(ctrl, global_cfg, cfg) if global_cfg.has_key('testing_endpoint'): if (global_cfg.get('static_dir', '') == ''): logger.warn( 'testing endpoint configured, but no testing directory. Falling back to ' + FAILSAFE_TESTING_DIR) staticdir = make_absolute( global_cfg.get('testing_dir', FAILSAFE_TESTING_DIR), '') cfg[global_cfg['testing_endpoint']] = { 'tools.staticdir.on': True, 'tools.staticdir.dir': staticdir, 'tools.staticdir.strip_version': True } if global_cfg.has_key('rss_endpoint'): logger.debug('Checking for shared storage location') rssdir = get_rss_parent_dir() if len(rssdir) > 0: logger.debug('Using shared storage location: %s' % rssdir) else: rssdir = make_absolute( global_cfg.get('rss_dir', FAILSAFE_RSS_DIR), '') logger.debug( 'No shared storage location configured, using: %s' % rssdir) cfg[global_cfg['rss_endpoint']] = { 'tools.staticdir.on': True, 'tools.staticdir.dir': rssdir, 'tools.staticdir.strip_version': False, 'tools.staticdir.default_ext': 'xml', 'error_page.404': make_splunkhome_path( [FAILSAFE_STATIC_DIR, 'html', 'rss_404.html']) } # Modules served statically out of /modules or out of an app's modules dir def module_resolver(section, branch, dir): from lib.apps import local_apps # first part of branch is the module name parts = os.path.normpath(branch.strip('/')).replace( os.path.sep, '/').split('/') locale = i18n.current_lang(True) if not parts: return False module_path = local_apps.getModulePath(parts[0]) if module_path: fn = os.path.join(module_path, *parts[1:]) if fn.endswith('.js') and os.path.exists(fn): return i18n.translate_js( fn ) # returns the path to a cached file containing the original js + json translation map return fn elif parts[0].startswith('modules-') and parts[0].endswith('.js'): hash = parts[0].replace('modules-', '').replace('.min.js', '') return make_absolute( os.path.join( i18n.CACHE_PATH, '%s-%s-%s.cache' % ('modules.min.js', hash, locale))) elif parts[0].startswith('modules-') and parts[0].endswith('.css'): return filechain.MODULE_STATIC_CACHE_PATH + os.sep + 'css' + os.sep + parts[ 0] return False moddir = make_absolute( global_cfg.get('module_dir', FAILSAFE_MODULE_PATH)) cfg['/modules'] = { 'tools.staticdir.strip_version': True, 'tools.staticdir.on': True, 'tools.staticdir.match': re.compile( r'.*\.(?!html$|spec$|py$)' ), # only files with extensions other than .html, .py and .spec are served 'tools.staticdir.dir': moddir, 'tools.staticdir.resolver': module_resolver, 'tools.staticdir.content_types': { 'js': 'application/javascript' } # correct python's application/x-javascript } cfg['/'] = { 'request.dispatch': i18n.I18NDispatcher(), } # enable gzip + i18n goodness if global_cfg.get('enable_gzip', False): cfg['/'].update({ 'tools.gzip.on': True, 'tools.gzip.mime_types': [ 'text/plain', 'text/html', 'text/css', 'application/javascript', 'application/x-javascript' ], }) #cfg['/']['tools.gzip.on'] = False # Set maximum filesize we can receive (in MB) maxsize = global_cfg.get('max_upload_size', DEFAULT_MAX_UPLOAD_SIZE) cfg['global']['server.max_request_body_size'] = int( maxsize) * 1024 * 1024 if global_cfg.get('enable_throttle', False): from lib import throttle cfg['global'].update({ 'tools.throttle.on': True, 'tools.throttle.bandwidth': int(global_cfg.get('throttle_bandwidth', 50)), 'tools.throttle.latency': int(global_cfg.get('throttle_latency', 100)) }) if global_cfg.get('enable_log_runtime', False): points = global_cfg.get('enable_log_runtime') if points == 'All': points = 'on_start_resource,before_request_body,before_handler,before_finalize,on_end_resource,on_end_request' if points is True: points = 'on_end_resource' for point in points.split(','): def log_closure(point): def log(): import time starttime = cherrypy.response.time endtime = time.time() delta = (endtime - starttime) * 1000 logger.warn( 'log_runtime point=%s path="%s" start=%f end=%f delta_ms=%.1f' % (point, cherrypy.request.path_info, starttime, endtime, delta)) return log setattr(cherrypy.tools, 'log_' + point, cherrypy.Tool(point, log_closure(point))) cfg['/']['tools.log_%s.on' % point] = True if global_cfg.get('storm_enabled'): from splunk.appserver.mrsparkle.lib.storm import hook_storm_session hook_storm_session() # setup handler to create and remove the pidfile pid_path = make_absolute(global_cfg.get('pid_path', PID_PATH)) ProcessID(cherrypy.engine, pid_path).subscribe() # # process splunkd status information # startup.initVersionInfo() # set start time for restart checking cfg['global']['start_time'] = time.time() # setup global error handling page cfg['global']['error_page.default'] = error.handleError # # TODO: refactor me into locale stuff # cfg['global']['DISPATCH_TIME_FORMAT'] = '%s.%Q' # END # Common splunk paths cfg['global']['etc_path'] = make_absolute(SPLUNK_ETC_PATH) cfg['global']['site_packages_path'] = make_absolute( SPLUNK_SITE_PACKAGES_PATH) cfg['global']['mrsparkle_path'] = make_absolute(SPLUNK_MRSPARKLE_PATH) listen_on_ipv6 = global_cfg.get('listenOnIPv6') socket_host = global_cfg.get('server.socket_host') if not socket_host: if listen_on_ipv6: socket_host = global_cfg['server.socket_host'] = '::' else: socket_host = global_cfg['server.socket_host'] = '0.0.0.0' logger.info("server.socket_host defaulting to %s" % socket_host) if ':' in socket_host: if not listen_on_ipv6: logger.warn( 'server.socket_host was set to IPv6 address "%s", so ignoring listenOnIPv6 value of "%s"' % (socket_host, listen_on_ipv6)) else: if listen_on_ipv6: logger.warn( 'server.socket_host was to to IPv4 address "%s", so ignoring listenOnIPv6 values of "%s"' % (socket_host, listen_on_ipv6)) if socket_host == '::': # Start a second server to listen to the IPV6 socket if isinstance(listen_on_ipv6, bool) or listen_on_ipv6.lower() != 'only': global_cfg['server.socket_host'] = '0.0.0.0' from cherrypy import _cpserver from cherrypy import _cpwsgi_server server2 = _cpserver.Server() server2.httpserver = _cpwsgi_server.CPWSGIServer() server2.httpserver.bind_addr = ( '::', global_cfg['server.socket_port']) server2.socket_host = '::' server2.socket_port = global_cfg['server.socket_port'] for key in ('ssl_private_key', 'ssl_certificate', 'ssl_v3_only', 'ssl_ciphers'): if 'server.' + key in global_cfg: setattr(server2, key, global_cfg['server.' + key]) setattr(server2.httpserver, key, global_cfg['server.' + key]) server2.subscribe() if root_name: # redirect / to the root endpoint cherrypy.tree.mount(RootController(), '/', cfg) cherrypy.config.update(cfg) if global_cfg.get('enable_profile', False): from cherrypy.lib import profiler cherrypy.tree.graft( profiler.make_app(cherrypy.Application(ctrl, '/' + root_name, cfg), path=global_cfg.get('profile_path', '/tmp/profile')), '/' + root_name) else: cherrypy.tree.mount(ctrl, '/' + root_name, cfg) cherrypy.engine.signal_handler.subscribe() # this makes Ctrl-C work when running in nodaemon if splunk.clilib.cli_common.isWindows: from cherrypy.process import win32 cherrypy.console_control_handler = win32.ConsoleCtrlHandler( cherrypy.engine) cherrypy.engine.console_control_handler.subscribe() # log active config for k in sorted(cherrypy.config): logger.info( 'CONFIG: %s (%s): %s' % (k, type(cherrypy.config[k]).__name__, cherrypy.config[k])) cherrypy.engine.start() # clean up caches on init filechain.clear_cache() i18n.init_js_cache() if blocking: # this routine that starts this as a windows service will not want us to block here. cherrypy.engine.block()