def __init__(self, environ, start_response): cny_log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache', consoleStream=environ['wsgi.errors']) # gunicorn likes to umask(0) when daemonizing, so put back something # reasonable if that's the case. oldUmask = os.umask(022) if oldUmask != 0: os.umask(oldUmask) self.start_response = start_response try: self.req = self.requestFactory(environ) except: log.exception("Error parsing request:") response = web_exc.HTTPBadRequest() self.iterable = response(environ, start_response) return if self.cfg is None: type(self).cfg = config.UpsrvConfig.load() self.req.cfg = self.cfg try: response = self.handleRequest() except web_exc.HTTPException, exc: response = exc
def main(args): import time from conary import conarycfg from conary.conaryclient.cmdline import parseTroveSpec setupLogging(consoleLevel=logging.DEBUG, consoleFormat='file') if len(args) == 2: troveSpec, kernelSpec, workDir = args[0], args[1], '.' elif len(args) == 3: troveSpec, kernelSpec, workDir = args else: sys.exit("Usage: %s <troveSpec> <kernelSpec> [<workDir>]" % sys.argv[0]) cfg = conarycfg.ConaryConfiguration(False) cfg.configLine('includeConfigFile http://localhost/conaryrc') cli = ConaryClient(cfg) repos = cli.getRepos() troveTup = sorted(repos.findTrove(None, parseTroveSpec(troveSpec)))[-1] kernelTup = sorted(repos.findTrove(None, parseTroveSpec(kernelSpec)))[-1] generator = TemplateGenerator(troveTup, kernelTup, cfg, workDir) generator.getTemplate(start=True) while True: status, path = generator.getTemplate(start=False) if status == generator.Status.NOT_FOUND: print 'Failed!' break elif status == generator.Status.DONE: print 'Done:', path break time.sleep(1) generator.wait()
def main(args): parser = optparse.OptionParser() parser.add_option('-c', '--config-file', default=config.CONFIG_PATH) parser.add_option('-n', '--no-daemon', action='store_true') parser.add_option('--clean-mounts', action='store_true', help='Clean up stray mount points and logical volumes') parser.add_option('--clean-roots', action='store_true', help='Clean up old jobslave roots') options, args = parser.parse_args(args) cfg = config.MasterConfig() cfg.read(options.config_file) if options.clean_mounts or options.clean_roots: options.no_daemon = True level = cfg.getLogLevel() setupLogging(logPath=cfg.logPath, fileLevel=level, consoleFormat='file', consoleLevel=level if options.no_daemon else None) master = JobMaster(cfg) if options.clean_mounts: return master.clean_mounts() elif options.clean_roots: return master.clean_roots() elif options.no_daemon: master.pre_start() master.run() return 0 else: master.pre_start() # Double-fork to daemonize pid = os.fork() if pid: return pid = os.fork() if pid: os._exit(0) try: os.setsid() devNull = os.open(os.devnull, os.O_RDWR) os.dup2(devNull, sys.stdout.fileno()) os.dup2(devNull, sys.stderr.fileno()) os.dup2(devNull, sys.stdin.fileno()) os.close(devNull) fObj = open(cfg.pidFile, 'w') fObj.write(str(os.getpid())) fObj.close() master.run() finally: try: os.unlink(cfg.pidFile) finally: os._exit(0)
def test(): import epdb, signal print os.getpid() def hdlr(signum, sigtb): epdb.serve() signal.signal(signal.SIGUSR1, hdlr) setupLogging(consoleLevel=logging.DEBUG, consoleFormat='file') s = ProxyServer(7770) try: asyncore.loop(use_poll=True) except KeyboardInterrupt: print
def application(environ, start_response): if not logging.root.handlers: cny_log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache', consoleStream=environ['wsgi.errors']) vhostDir = _getVhostDir(environ) if not vhostDir: log.error("The CONARY_VHOST_DIR environment variable must be set to " "an existing directory") start_response('500 Internal Server Error', [('Content-Type', 'text/plain')]) return [ "ERROR: The server is not configured correctly. Check the " "server's error logs.\r\n" ] pathhost = httphost = repohost = None if environ.get('HTTP_X_CONARY_VHOST'): pathhost = environ['HTTP_X_CONARY_VHOST'] elif environ.get('PATH_INFO'): path = environ['PATH_INFO'].split('/') if path[0] == '' and '.' in path[1]: # http://big.server/foo.com/conary/browse pathhost = path[1] environ['SCRIPT_NAME'] += '/'.join(path[:2]) environ['PATH_INFO'] = '/' + '/'.join(path[2:]) if not pathhost: # http://repo.hostname/conary/browse httphost = environ.get('HTTP_HOST', '').split(':')[0] if not httphost: start_response('400 Bad Request', [('Content-Type', 'text/plain')]) return ["ERROR: No server name was supplied\r\n"] # repositoryMap repo.hostname http://big.server/conary/ repohost = environ.get('HTTP_X_CONARY_SERVERNAME', '') names = [x for x in [pathhost, httphost, repohost] if x] for var in names: if '..' in var or '/' in var or os.path.sep in var: start_response('400 Bad Request', [('Content-Type', 'text/plain')]) return ["ERROR: Illegal header value\r\n"] if var: path = os.path.join(vhostDir, var) if os.path.isfile(path): break else: log.error("vhost path %s not found", path) start_response('404 Not Found', [('Content-Type', 'text/plain')]) names = ' or '.join(names) return ["ERROR: No server named %s exists here\r\n" % names] environ['conary.netrepos.config_file'] = path return wsgi_hooks.makeApp({})(environ, start_response)
def application(environ, start_response): if not logging.root.handlers: cny_log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache', consoleStream=environ['wsgi.errors']) vhostDir = _getVhostDir(environ) if not vhostDir: log.error("The CONARY_VHOST_DIR environment variable must be set to " "an existing directory") start_response('500 Internal Server Error', [('Content-Type', 'text/plain')]) return ["ERROR: The server is not configured correctly. Check the " "server's error logs.\r\n"] pathhost = httphost = repohost = None if environ.get('HTTP_X_CONARY_VHOST'): pathhost = environ['HTTP_X_CONARY_VHOST'] elif environ.get('PATH_INFO'): path = environ['PATH_INFO'].split('/') if path[0] == '' and '.' in path[1]: # http://big.server/foo.com/conary/browse pathhost = path[1] environ['SCRIPT_NAME'] += '/'.join(path[:2]) environ['PATH_INFO'] = '/' + '/'.join(path[2:]) if not pathhost: # http://repo.hostname/conary/browse httphost = environ.get('HTTP_HOST', '').split(':')[0] if not httphost: start_response('400 Bad Request', [('Content-Type', 'text/plain')]) return ["ERROR: No server name was supplied\r\n"] # repositoryMap repo.hostname http://big.server/conary/ repohost = environ.get('HTTP_X_CONARY_SERVERNAME', '') names = [x for x in [pathhost, httphost, repohost] if x] for var in names: if '..' in var or '/' in var or os.path.sep in var: start_response('400 Bad Request', [('Content-Type', 'text/plain')]) return ["ERROR: Illegal header value\r\n"] if var: path = os.path.join(vhostDir, var) if os.path.isfile(path): break else: log.error("vhost path %s not found", path) start_response('404 Not Found', [('Content-Type', 'text/plain')]) names = ' or '.join(names) return ["ERROR: No server named %s exists here\r\n" % names] environ['conary.netrepos.config_file'] = path return wsgi_hooks.makeApp({})(environ, start_response)
def configureLogging(self, logFile, verbosity): logDir = os.path.dirname(logFile) if not logDir: # Initial configuration; don't even bother return conary_util.mkdirChain(logDir) if verbosity == 0: consoleLevel = fileLevel = logging.WARN elif verbosity == 1: consoleLevel = fileLevel = logging.INFO else: consoleLevel = fileLevel = logging.DEBUG cny_log.setupLogging( logPath=logFile, consoleLevel=consoleLevel, fileLevel=fileLevel, logger='forester', )
def configureLogging(self, logFile, debug, quiet): if debug: consoleLevel = logging.DEBUG fileLevel = logging.DEBUG elif quiet: consoleLevel = logging.ERROR fileLevel = logging.INFO else: consoleLevel = logging.INFO fileLevel = logging.INFO cny_log.setupLogging( logPath=logFile, consoleLevel=consoleLevel, consoleFormat='apache', fileLevel=fileLevel, fileFormat='apache', logger='client', )
def configureLogging(self, logFile, debug, quiet): if debug: consoleLevel = logging.DEBUG fileLevel = logging.DEBUG elif quiet: consoleLevel = logging.ERROR fileLevel = logging.INFO else: consoleLevel = logging.INFO fileLevel = logging.INFO cny_log.setupLogging( logPath=logFile, consoleLevel=consoleLevel, consoleFormat='apache', fileLevel=fileLevel, fileFormat='apache', logger='spanner', )
def _main(argv, MainClass): """ Python hook for starting rbuild from the command line. @param argv: standard argument vector """ if argv is None: argv = sys.argv #pylint: disable-msg=E0701 # pylint complains about except clauses here because we sometimes # redefine debuggerException debuggerException = Exception try: argv = list(argv) debugAll = '--debug-all' in argv if debugAll: log.setupLogging(consoleLevel=logging.DEBUG) argv.remove('--debug-all') else: debuggerException = errors.JButlerInternalError sys.excepthook = errors.genExcepthook(debug=debugAll, debugCtrlC=debugAll) rc = MainClass().main(argv, debuggerException=debuggerException) if rc is None: return 0 return rc except debuggerException: raise except IOError as e: # allow broken pipe to exit if e.errno != errno.EPIPE: log.error(e) return 1 except KeyboardInterrupt: return 1 except Exception as e: log.error(e) return 1 return 0
def __call__(self, environ, start_response): if not logging.root.handlers: cny_log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache', consoleStream=environ['wsgi.errors']) # gunicorn likes to umask(0) when daemonizing, so put back something # reasonable if that's the case. oldUmask = os.umask(022) if oldUmask != 0: os.umask(oldUmask) environ.update(self.envOverrides) request = self.requestFactory(environ) try: response = self.handleRequest(request, start_response) if callable(response): # Looks like a webob response return response(environ, start_response) else: # Looks like a vanilla WSGI iterable return response except: exc_info = sys.exc_info() return self.handleError(request, exc_info, start_response)
def main(args=sys.argv[1:]): cny_log.setupLogging(consoleLevel=logging.INFO) parser = optparse.OptionParser( usage='%prog {--graph,--required-hosts,--scm} root', version="%prog " + bob_version.version ) parser.add_option('--graph', action='store_true') parser.add_option('--required-hosts', action='store_true') parser.add_option('--scm', action='store_true') options, args = parser.parse_args(args) if len(args) != 1 or not (options.graph or options.required_hosts or options.scm): parser.error('wrong arguments') root = os.path.abspath(args[0]) # Collect a list of bob plans bobfiles = set() for dirpath, dirnames, filenames in os.walk(root): dirnames.sort() filenames.sort() reldir = dirpath[len(root)+1:] for filename in filenames: if filename.endswith('.bob'): relpath = os.path.join(reldir, filename) bobfiles.add(relpath) if options.scm: watchMap = {} for plan in bobfiles: cfg = config.openPlan(os.path.join(root, plan)) aliases = {} watchPaths = {} for name, value in cfg.scm.items(): name %= cfg.macros value %= cfg.macros kind, url = value.split()[:2] if kind != 'wms': continue aliases[name] = url for target in cfg.target: sec = cfg.getSection('target:' + target) if sec.sourceTree: name, path = sec.sourceTree.split(None, 1) name %= cfg.macros path %= cfg.macros watchPaths.setdefault(name, set()).add(path) if sec.scm: name = sec.scm % cfg.macros watchPaths.setdefault(name, set()).add('') for alias, paths in watchPaths.items(): if alias not in aliases: continue url = aliases[alias] if '' in paths: paths = set(['']) watchList = watchMap.setdefault(plan, set()) for path in paths: watchList.add((url, path)) print "# Map of plan files to SCM paths they consume" print "scm_deps = ", pprint.pprint(watchMap) if not options.graph and not options.required_hosts: sys.exit(0) recipeDir = tempfile.mkdtemp(prefix='bob-recipes-') pluginMgr = bobmain.getPluginManager() pool = multiprocessing.Pool(processes=4) try: # First pass: mangle and dump all the recipes so that loadSuperClass() # can work without actually committing anything. ok = pool.map(dump_recipes, [(root, pluginMgr, recipeDir, x) for x in bobfiles]) if False in ok: sys.exit("Failed to load recipes") # Second pass: make provides and requires out of the bob plan, recipe # PackageSpecs, and group recipe inputs. provides = {} requires = {} results = pool.map(_analyze_plan, [(root, pluginMgr, recipeDir, x) for x in bobfiles]) if None in results: sys.exit("Failed to analyze recipes") for plan_requires, plan_provides in results: for key, value in plan_requires.iteritems(): requires.setdefault(key, set()).update(value) for key, value in plan_provides.iteritems(): provides.setdefault(key, set()).update(value) pool.close() finally: util.rmtree(recipeDir) if options.graph: # Make edges out of any provided thing. Requires that don't match any # provider are discarded, since they are outside the analyzed set. edges = {} for item, providers in provides.iteritems(): requirers = requires.get(item, set()) for provider in providers: edges.setdefault(provider, set()).update(requirers) # Remove edges that are made entirely redundant by a longer path. edges_trimmed = {} for provider, requirers in edges.iteritems(): requirers.discard(provider) requirers = dedupe(requirers, edges) edges_trimmed[provider] = requirers print '# map of providers to the set of requirers' print 'dep_graph = ', pprint.pprint(edges_trimmed) if options.required_hosts: mapping = {} for item, requirers in requires.iteritems(): if item.count('=') != 1: print "Doesn't look like a trovespec:", item continue name, version = item.split('=') if version.count('@') != 1: print "Doesn't look like a trovespec:", item continue host = version.split('@')[0] if host.count('/') == 1 and host[0] == '/': host = host[1:] mapping.setdefault(host, {})[item] = requirers for host, items in sorted(mapping.items()): print host for item, requirers in sorted(items.items()): print ' ', item, '\t', sorted(requirers)[0]
def main(args=sys.argv[1:]): cny_log.setupLogging(consoleLevel=logging.INFO) parser = optparse.OptionParser(usage='%prog {--graph,--required-hosts,--scm} root') parser.add_option('--graph', action='store_true') parser.add_option('--required-hosts', action='store_true') parser.add_option('--scm', action='store_true') options, args = parser.parse_args(args) if len(args) != 1 or not (options.graph or options.required_hosts or options.scm): parser.error('wrong arguments') root = os.path.abspath(args[0]) # Collect a list of bob plans bobfiles = set() for dirpath, dirnames, filenames in os.walk(root): dirnames.sort() filenames.sort() reldir = dirpath[len(root)+1:] for filename in filenames: if filename.endswith('.bob'): relpath = os.path.join(reldir, filename) bobfiles.add(relpath) if options.scm: watchMap = {} for plan in bobfiles: cfg = config.openPlan(os.path.join(root, plan)) aliases = {} watchPaths = {} for name, value in cfg.scm.items(): name %= cfg.macros value %= cfg.macros kind, url = value.split()[:2] if kind != 'wms': continue aliases[name] = url for target in cfg.target: sec = cfg.getSection('target:' + target) if sec.sourceTree: name, path = sec.sourceTree.split(None, 1) name %= cfg.macros path %= cfg.macros watchPaths.setdefault(name, set()).add(path) if sec.scm: name = sec.scm % cfg.macros watchPaths.setdefault(name, set()).add('') for alias, paths in watchPaths.items(): if alias not in aliases: continue url = aliases[alias] if '' in paths: paths = set(['']) watchList = watchMap.setdefault(plan, set()) for path in paths: watchList.add((url, path)) print "# Map of plan files to SCM paths they consume" print "scm_deps = ", pprint.pprint(watchMap) if not options.graph and not options.required_hosts: sys.exit(0) recipeDir = tempfile.mkdtemp(prefix='bob-recipes-') pluginMgr = bobmain.getPluginManager() pool = multiprocessing.Pool(processes=4) try: # First pass: mangle and dump all the recipes so that loadSuperClass() # can work without actually committing anything. ok = pool.map(dump_recipes, [(root, pluginMgr, recipeDir, x) for x in bobfiles]) if False in ok: sys.exit("Failed to load recipes") # Second pass: make provides and requires out of the bob plan, recipe # PackageSpecs, and group recipe inputs. provides = {} requires = {} results = pool.map(_analyze_plan, [(root, pluginMgr, recipeDir, x) for x in bobfiles]) if None in results: sys.exit("Failed to analyze recipes") for plan_requires, plan_provides in results: for key, value in plan_requires.iteritems(): requires.setdefault(key, set()).update(value) for key, value in plan_provides.iteritems(): provides.setdefault(key, set()).update(value) pool.close() finally: util.rmtree(recipeDir) if options.graph: # Make edges out of any provided thing. Requires that don't match any # provider are discarded, since they are outside the analyzed set. edges = {} for item, providers in provides.iteritems(): requirers = requires.get(item, set()) for provider in providers: edges.setdefault(provider, set()).update(requirers) # Remove edges that are made entirely redundant by a longer path. edges_trimmed = {} for provider, requirers in edges.iteritems(): requirers.discard(provider) requirers = dedupe(requirers, edges) edges_trimmed[provider] = requirers print '# map of providers to the set of requirers' print 'dep_graph = ', pprint.pprint(edges_trimmed) if options.required_hosts: mapping = {} for item, requirers in requires.iteritems(): if item.count('=') != 1: print "Doesn't look like a trovespec:", item continue name, version = item.split('=') if version.count('@') != 1: print "Doesn't look like a trovespec:", item continue host = version.split('@')[0] if host.count('/') == 1 and host[0] == '/': host = host[1:] mapping.setdefault(host, {})[item] = requirers for host, items in sorted(mapping.items()): print host for item, requirers in sorted(items.items()): print ' ', item, '\t', sorted(requirers)[0]
def _handler(req): log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache') # Keep CONARY_LOG entries from being double-logged log.logger.handlers = [] repName = req.filename if repName in repositories: repServer, proxyServer, restHandler = repositories[repName] else: cfg = netserver.ServerConfig() cfg.read(req.filename) # Throw away any subdir portion. if cfg.baseUri: baseUri = cfg.baseUri else: baseUri = req.uri[:-len(req.path_info)] + '/' urlBase = "%%(protocol)s://%s:%%(port)d" % \ (req.server.server_hostname) + baseUri if not cfg.repositoryDB and not cfg.proxyContentsDir: log.error("repositoryDB or proxyContentsDir is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR elif cfg.repositoryDB and cfg.proxyContentsDir: log.error( "only one of repositoryDB or proxyContentsDir may be specified " "in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR if cfg.repositoryDB: if not cfg.contentsDir: log.error("contentsDir is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR elif not cfg.serverName: log.error("serverName is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR if os.path.realpath(cfg.tmpDir) != cfg.tmpDir: log.error("tmpDir cannot include symbolic links") if cfg.closed: # Closed repository repServer = netserver.ClosedRepositoryServer(cfg) proxyServer = proxy.SimpleRepositoryFilter(cfg, urlBase, repServer) restHandler = None elif cfg.proxyContentsDir: # Caching proxy repServer = None proxyServer = proxy.ProxyRepositoryServer(cfg, urlBase) restHandler = None else: # Full repository with changeset cache repServer = netserver.NetworkRepositoryServer(cfg, urlBase) proxyServer = proxy.SimpleRepositoryFilter(cfg, urlBase, repServer) if cresthooks and cfg.baseUri: restUri = cfg.baseUri + '/api' restHandler = cresthooks.ApacheHandler(restUri, repServer) else: restHandler = None repositories[repName] = repServer, proxyServer, restHandler port = req.connection.local_addr[1] # newer versions of mod_python provide a req.is_https() method secure = (req.subprocess_env.get('HTTPS', 'off').lower() == 'on') method = req.method.upper() try: try: # reopen database connections early to make sure crest gets a # working database connection if repServer: repServer.reopen() if method == "POST": return post(port, secure, proxyServer, req, repServer=repServer) elif method == "GET": return get(port, secure, proxyServer, req, restHandler, repServer=repServer) elif method == "PUT": return putFile(port, secure, proxyServer, req) else: return apache.HTTP_METHOD_NOT_ALLOWED finally: # Free temporary resources used by the repserver # e.g. pooled DB connections. if repServer: repServer.reset() except apache.SERVER_RETURN: # if the exception was an apache server return code, # re-raise it and let mod_python handle it. raise except IOError, e: # ignore when the client hangs up on us if str(e).endswith('client closed connection.'): pass else: raise
def main(): log.setupLogging(consoleLevel=logging.DEBUG, consoleFormat='file') start, end = sys.argv[1:] if os.path.exists(start): startrev = None if end == '.': # file -> working copy endrev = None else: # file -> revision endrev = hgresolve(end) else: # revision -> revision startrev = hgresolve(start) endrev = hgresolve(end) if (startrev or endrev) and os.system("hg id |grep -q +") == 0: sys.exit("working copy is not clean, commit or qrefresh first") workdir = None workdir = tempfile.mkdtemp() os.chmod(workdir, 0755) if os.getuid() == 0: os.chown(workdir, 99, 99) server = Postgres(workdir) try: server.start() # Initialize the old version migrated_db = createdb(server) if startrev: if os.system("hg up -C %s 1>&2" % startrev): sys.exit("hg failed") print 'Migrating from:' sys.stdout.flush() os.system("hg parents") else: restoredb(server, migrated_db, start) print 'Migrating from file', start migratedb(server, migrated_db) dumpdb(server, migrated_db, 'old.sql') with open('old.sql', 'a') as f: print >> f, '-- Generated on %s from revision %s' % ( time.strftime('%F %T %z'), startrev or '<unknown>') # Migrate to the new version if endrev: if os.system("hg up -C %s 1>&2" % endrev): sys.exit("hg failed") print 'Migrating to:' sys.stdout.flush() os.system("hg parents") else: print 'Migrating to working copy' migratedb(server, migrated_db) dumpdb(server, migrated_db, 'migrated.sql') with open('migrated.sql', 'a') as f: print >> f, ('-- Generated on %s by migrating from revision %s ' 'to revision %s' % (time.strftime('%F %T %z'), startrev or '<unknown>', endrev or '<unknown>')) # Initialize a fresh copy of the new version fresh_db = createdb(server) migratedb(server, fresh_db) dumpdb(server, fresh_db, 'fresh.sql') with open('fresh.sql', 'a') as f: print >> f, '-- Generated on %s from revision %s' % ( time.strftime('%F %T %z'), endrev or '<unknown>') # Compare print print print 'Comparison result:' import explodeschema result = explodeschema.diff(server.connectPsyco(migrated_db), server.connectPsyco(fresh_db)) finally: server.kill() util.rmtree(workdir) print if result: print 'FAILURE' else: print 'SUCCESS'
def _handler(req): log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache') # Keep CONARY_LOG entries from being double-logged log.logger.handlers = [] repName = req.filename if repName in repositories: repServer, proxyServer, restHandler = repositories[repName] else: cfg = netserver.ServerConfig() cfg.read(req.filename) # Throw away any subdir portion. if cfg.baseUri: baseUri = cfg.baseUri else: baseUri = req.uri[:-len(req.path_info)] + '/' urlBase = "%%(protocol)s://%s:%%(port)d" % \ (req.server.server_hostname) + baseUri if not cfg.repositoryDB and not cfg.proxyContentsDir: log.error("repositoryDB or proxyContentsDir is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR elif cfg.repositoryDB and cfg.proxyContentsDir: log.error("only one of repositoryDB or proxyContentsDir may be specified " "in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR if cfg.repositoryDB: if not cfg.contentsDir: log.error("contentsDir is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR elif not cfg.serverName: log.error("serverName is required in %s" % req.filename) return apache.HTTP_INTERNAL_SERVER_ERROR if os.path.realpath(cfg.tmpDir) != cfg.tmpDir: log.error("tmpDir cannot include symbolic links") if cfg.closed: # Closed repository repServer = netserver.ClosedRepositoryServer(cfg) proxyServer = proxy.SimpleRepositoryFilter(cfg, urlBase, repServer) restHandler = None elif cfg.proxyContentsDir: # Caching proxy repServer = None proxyServer = proxy.ProxyRepositoryServer(cfg, urlBase) restHandler = None else: # Full repository with changeset cache repServer = netserver.NetworkRepositoryServer(cfg, urlBase) proxyServer = proxy.SimpleRepositoryFilter(cfg, urlBase, repServer) if cresthooks and cfg.baseUri: restUri = cfg.baseUri + '/api' restHandler = cresthooks.ApacheHandler(restUri, repServer) else: restHandler = None repositories[repName] = repServer, proxyServer, restHandler port = req.connection.local_addr[1] # newer versions of mod_python provide a req.is_https() method secure = (req.subprocess_env.get('HTTPS', 'off').lower() == 'on') method = req.method.upper() try: try: # reopen database connections early to make sure crest gets a # working database connection if repServer: repServer.reopen() if method == "POST": return post(port, secure, proxyServer, req, repServer=repServer) elif method == "GET": return get(port, secure, proxyServer, req, restHandler, repServer=repServer) elif method == "PUT": return putFile(port, secure, proxyServer, req) else: return apache.HTTP_METHOD_NOT_ALLOWED finally: # Free temporary resources used by the repserver # e.g. pooled DB connections. if repServer: repServer.reset() except apache.SERVER_RETURN: # if the exception was an apache server return code, # re-raise it and let mod_python handle it. raise except IOError, e: # ignore when the client hangs up on us if str(e).endswith('client closed connection.'): pass else: raise
def paster_main(global_config, **settings): """Wrapper to enable "paster serve" """ cny_log.setupLogging(consoleLevel=logging.INFO, consoleFormat='apache') return makeApp(settings)