def index(self): h = "".join(super(MDStats, self).index()) parser = etree.HTMLParser() tree = etree.parse(StringIO(h), parser) body = tree.getroot().find("body") body.tag = 'div' hstr = etree.tostring(body, pretty_print=True, method="html") return template("basic.html").render(content=hstr, http=cherrypy.request)
def about(self): import pkg_resources # part of setuptools version = pkg_resources.require("pyFF")[0].version return template("about.html").render(version=version, cversion=cherrypy.__version__, sysinfo=" ".join(os.uname()), http=cherrypy.request, cmdline=" ".join(sys.argv), stats=stats, repo=self.server.md, plumbings=["%s" % p for p in self.server.plumbings])
cons_thread = threading.Thread(target=consumer, args=( q, len(resources), stats, next_jobs, resolved)) prod_thread.start() cons_thread.start() prod_thread.join() cons_thread.join() log.debug("after fetch: %d jobs to retry" % len(next_jobs)) if len(next_jobs) > 0: resources = next_jobs cache = False else: resources = [] if xrd is not None: with open(xrd, "w") as fd: fd.write(template("trust.xrd").render(links=resolved)) def parse_metadata(self, fn, key=None, base_url=None, fail_on_error=False, filter_invalid=True): """Parse a piece of XML and split it up into EntityDescriptor elements. Each such element is stored in the MDRepository instance. :param fn: a file-like object containing SAML metadata :param key: a certificate (file) or a SHA1 fingerprint to use for signature verification :param base_url: use this base url to resolve relative URLs for XInclude processing """ try: t = etree.parse(fn, base_url=base_url, parser=etree.XMLParser(resolve_entities=False)) t.xinclude() if filter_invalid: for e in t.findall('{%s}EntityDescriptor' % NS['md']):
def error_page(code, **kwargs): kwargs['http'] = cherrypy.request return template("%d.html" % code).render(**kwargs)
def request(self, **kwargs): stats['MD Requests'] += 1 pfx = kwargs.get('pfx', None) path = kwargs.get('path', None) content_type = kwargs.get('content_type', None) log.debug("request pfx=%s, path=%s, content_type=%s" % (pfx, path, content_type)) def escape(m): st = m.group(0) if st == '<': return '<' if st == '>': return '>' return st def _d(x): if x is None or len(x) == 0: return None, None if x.startswith("{base64}"): x = x[8:].decode('base64') if '.' in x: (p, sep, ext) = x.rpartition('.') return p, ext else: return x, None _ctypes = {'xml': 'application/xml', 'json': 'application/json', 'htm': 'text/html', 'html': 'text/html', 'ds': 'text/html', 's': 'application/json'} alias = None if pfx: alias = pfx pfx = self.aliases.get(alias, None) if pfx is None: raise NotFound() path, ext = _d(path) if pfx and path: q = "{%s}%s" % (pfx, path) else: q = path logging.debug("request %s %s" % (path, ext)) log.debug(cherrypy.request.headers) accept = {} if content_type is None: if ext is not None and ext in _ctypes: accept = {_ctypes[ext]: True} else: accept = MDServer.MediaAccept() if ext is not None: path = "%s.%s" % (path, ext) else: accept = {content_type: True} with self.lock.readlock: if ext == 'ds': pdict = dict() pdict['http'] = cherrypy.request entityID = kwargs.get('entityID', None) if entityID is None: raise HTTPError(400, "400 Bad Request - missing entityID") pdict['sp'] = self.md.sha1_id(entityID) pdict['ret'] = kwargs.get('return', None) if not path: pdict['search'] = "/search/" else: pdict['search'] = "%s.s" % path if pdict['ret'] is None: raise HTTPError(400, "400 Bad Request - Missing 'return' parameter") pdict['returnIDParam'] = kwargs.get('returnIDParam', 'entityID') cherrypy.response.headers['Content-Type'] = 'text/html' pdict['style'] = '/static/css/style.css' # TODO figure out how to sensibly set this per request return template("ds.html").render(**pdict) elif ext == 's': paged = bool(kwargs.get('paged', False)) query = kwargs.get('query', None) page = kwargs.get('page', 0) page_limit = kwargs.get('page_limit', 10) entity_filter = kwargs.get('entity_filter', None) cherrypy.response.headers['Content-Type'] = 'application/json' if paged: res, more, total = self.md.search(query, path=q, page=int(page), page_limit=int(page_limit), entity_filter=entity_filter) log.debug(dumps({'entities': res, 'more': more, 'total': total})) return dumps({'entities': res, 'more': more, 'total': total}) else: return dumps(self.md.search(query, path=q, entity_filter=entity_filter)) elif accept.get('text/html'): if not q: if pfx: title = pfx else: title = "Metadata By Attributes" return template("index.html").render(http=cherrypy.request, md=self.md, alias=alias, aliases=self.aliases, title=title) else: entities = self.md.lookup(q) if not entities: raise NotFound() if len(entities) > 1: return template("metadata.html").render(http=cherrypy.request, md=self.md, entities=entities) else: entity = entities[0] t = html.fragment_fromstring(unicode(xslt_transform(entity, "entity2html.xsl"))) for c_elt in t.findall(".//code[@role='entity']"): c_txt = dumptree(entity, pretty_print=True, xml_declaration=False).decode("utf-8") p = c_elt.getparent() p.remove(c_elt) if p.text is not None: p.text += c_txt # re.sub(".",escape,c_txt) else: p.text = c_txt # re.sub(".",escape,c_txt) xml = dumptree(t, xml_declaration=False).decode('utf-8') return template("basic.html").render(http=cherrypy.request, content=xml) else: for p in self.plumbings: state = {'request': True, 'headers': {'Content-Type': 'text/xml'}, 'accept': accept, 'url': cherrypy.url(relative=False), 'select': q, 'stats': {}} r = p.process(self.md, state=state) if r is not None: cache_ttl = state.get('cache', 0) log.debug("caching for %d seconds" % cache_ttl) caching.expires(secs=cache_ttl) for k, v in state.get('headers', {}).iteritems(): cherrypy.response.headers[k] = v return r raise NotFound()
def finger(self, domain="localhost"): return template("finger.html").render(http=cherrypy.request, domain=domain)
prod_thread = threading.Thread(target=producer, args=(q, resources, cache)) cons_thread = threading.Thread(target=consumer, args=(q, len(resources), stats, next_jobs, resolved)) prod_thread.start() cons_thread.start() prod_thread.join() cons_thread.join() log.debug("after fetch: %d jobs to retry" % len(next_jobs)) if len(next_jobs) > 0: resources = next_jobs cache = False else: resources = [] if xrd is not None: with open(xrd, "w") as fd: fd.write(template("trust.xrd").render(links=resolved)) def parse_metadata(self, fn, key=None, base_url=None, fail_on_error=False, filter_invalid=True): """Parse a piece of XML and split it up into EntityDescriptor elements. Each such element is stored in the MDRepository instance. :param fn: a file-like object containing SAML metadata :param key: a certificate (file) or a SHA1 fingerprint to use for signature verification :param base_url: use this base url to resolve relative URLs for XInclude processing """ try: t = etree.parse(fn, base_url=base_url, parser=etree.XMLParser(resolve_entities=False)) t.xinclude() if filter_invalid: for e in t.findall('{%s}EntityDescriptor' % NS['md']): if not schema().validate(e):