def search(self, environ, start_response): """WSGI method, called by the wsgi app for requests that matches ``searchendpoint``.""" queryparams = self._search_parse_query(environ['QUERY_STRING']) res, pager = self._search_run_query(queryparams) if pager['totalresults'] == 1: title = "1 match" else: title = "%s matches" % pager['totalresults'] title += " for '%s'" % queryparams.get("q") body = html.Body() for r in res: if not 'dcterms_title' in r or r['dcterms_title'] is None: r['dcterms_title'] = r['uri'] if r.get('dcterms_identifier', False): r['dcterms_title'] = r['dcterms_identifier'] + ": " + r[ 'dcterms_title'] body.append( html.Div([ html.H2([elements.Link(r['dcterms_title'], uri=r['uri'])]), r.get('text', '') ], **{'class': 'hit'})) pagerelem = self._search_render_pager(pager, queryparams, environ['PATH_INFO']) body.append( html.Div([ html.P([ "Results %(firstresult)s-%(lastresult)s " "of %(totalresults)s" % pager ]), pagerelem ], **{'class': 'pager'})) data = self._transform(title, body, environ, template="xsl/search.xsl") return self._return_response(data, start_response)
def search(self, environ, start_response): """WSGI method, called by the wsgi app for requests that matches ``searchendpoint``.""" queryparams = self._search_parse_query(environ['QUERY_STRING']) # massage queryparams['issued'] if present, then restore it y = None if 'issued' in queryparams: y = int(queryparams['issued']) queryparams['issued'] = Between(datetime(y, 1, 1), datetime(y, 12, 31, 23, 59, 59)) boost_types = [("sfs", 10)] res, pager = self._search_run_query(queryparams, boost_types=boost_types) if y: queryparams['issued'] = str(y) if pager['totalresults'] == 1: title = "1 träff" else: title = "%s träffar" % pager['totalresults'] title += " för '%s'" % queryparams.get("q") body = html.Body() if hasattr(res, 'aggregations'): body.append( self._search_render_facets(res.aggregations, queryparams, environ)) for r in res: if 'label' not in r: label = r['uri'] elif isinstance(r['label'], list): label = str(r['label']) # flattens any nested element # structure, eg # <p><strong><em>foo</em></strong></p> # -> foo else: label = r['label'] rendered_hit = html.Div([ html.B([elements.Link(label, uri=r['uri'])], ** {'class': 'lead'}) ], **{'class': 'hit'}) if r.get('text'): rendered_hit.append(html.P([r.get('text', '')])) if 'innerhits' in r: for innerhit in r['innerhits']: rendered_hit.append(self._search_render_innerhit(innerhit)) body.append(rendered_hit) pagerelem = self._search_render_pager(pager, queryparams, environ['PATH_INFO']) body.append( html.Div([ html.P([ "Träff %(firstresult)s-%(lastresult)s " "av %(totalresults)s" % pager ]), pagerelem ], **{'class': 'pager'})) data = self._transform(title, body, environ, template="xsl/search.xsl") return self._return_response(data, start_response)
def exception(self, environ, start_response): import traceback from pprint import pformat exc_type, exc_value, tb = sys.exc_info() tblines = traceback.format_exception(exc_type, exc_value, tb) tbstr = "\n".join(tblines) # render the error title = tblines[-1] body = html.Body([ html.Div([ html.H1(self.exception_heading), html.P([self.exception_description]), html.H2("Traceback"), html.Pre([tbstr]), html.H2("Variables"), html.Pre([ "request_uri: %s\nos.getcwd(): %s" % (request_uri(environ), os.getcwd()) ]), html.H2("environ"), html.Pre([pformat(environ)]), html.H2("sys.path"), html.Pre([pformat(sys.path)]), html.H2("os.environ"), html.Pre([pformat(dict(os.environ))]) ]) ]) msg = self._transform(title, body, environ) return self._return_response(msg, start_response, status="500 Internal Server Error", contenttype="text/html")
def _search_render_facets(self, facets, queryparams, environ): facetgroups = [] commondata = self.repos[0].commondata searchurl = request_uri(environ, include_query=False) for facetresult in ('type', 'creator', 'issued'): if facetresult in facets: if facetresult in queryparams: # the user has selected a value for this # particular facet, we should not display all # buckets (but offer a link to reset the value) qpcopy = dict(queryparams) del qpcopy[facetresult] href = "%s?%s" % (searchurl, urlencode(qpcopy)) val = queryparams[facetresult] if facetresult == "creator": val = self.repos[0].lookup_label(val) elif facetresult == "type": val = self.repolabels.get(val, val) lbl = "%s: %s" % (self.facetlabels.get( facetresult, facetresult), val) facetgroups.append( html.LI([ lbl, html.A( "\xa0", **{ 'href': href, 'class': 'glyphicon glyphicon-remove' }) ])) else: facetgroup = [] for bucket in facets[facetresult]['buckets']: if facetresult == 'type': lbl = self.repolabels.get(bucket['key'], bucket['key']) key = bucket['key'] elif facetresult == 'creator': k = URIRef(bucket['key']) pred = SKOS.altLabel if commondata.value( k, SKOS.altLabel) else FOAF.name lbl = commondata.value(k, pred) key = bucket['key'] elif facetresult == "issued": lbl = bucket["key_as_string"] key = lbl qpcopy = dict(queryparams) qpcopy[facetresult] = key href = "%s?%s" % (searchurl, urlencode(qpcopy)) facetgroup.append( html.LI([ html.A("%s" % (lbl), **{'href': href}), html.Span([str(bucket['doc_count'])], **{'class': 'badge pull-right'}) ])) lbl = self.facetlabels.get(facetresult, facetresult) facetgroups.append( html.LI([html.P([lbl]), html.UL(facetgroup)])) return html.Div(facetgroups, **{'class': 'facets'})
def test_elements_from_soup(self): from ferenda.elements import html soup = BeautifulSoup( """<body> <h1>Sample</h1> <div class="main"> <img src="xyz.png"/> <p>Some <b>text</b></p> <dl> <dt>Term 1</dt> <dd>Definition 1</dd> </dl> </div> <div id="foot"> <hr/> <a href="/">home</a> - <a href="/about">about</a> </div> </body>""", "lxml") body = html.elements_from_soup(soup.body) # print("Body: \n%s" % serialize(body)) result = html.Body([ html.H1(["Sample"]), html.Div([ html.Img(src="xyz.png"), html.P(["Some ", html.B(["text"])]), html.DL([html.DT(["Term 1"]), html.DD(["Definition 1"])]) ], **{"class": "main"}), html.Div([ html.HR(), html.A(["home"], href="/"), " - ", html.A(["about"], href="/about") ], id="foot") ]) self.maxDiff = 4096 self.assertEqual(serialize(body), serialize(result))
def handle_search(self, request, **values): # return Response("<h1>Hello search: " + request.args.get("q") +" </h1>", mimetype="text/html") res, pager = self._search_run_query(request.args) if pager['totalresults'] == 1: title = "1 match" else: title = "%s matches" % pager['totalresults'] title += " for '%s'" % request.args.get("q") body = html.Body() for r in res: if not 'dcterms_title' in r or r['dcterms_title'] is None: r['dcterms_title'] = r['uri'] if r.get('dcterms_identifier', False): r['dcterms_title'] = r['dcterms_identifier'] + ": " + r[ 'dcterms_title'] body.append( html.Div([ html.H2([elements.Link(r['dcterms_title'], uri=r['uri'])]), r.get('text', '') ], **{'class': 'hit'})) pagerelem = self._search_render_pager(pager, dict(request.args), request.path) body.append( html.Div([ html.P([ "Results %(firstresult)s-%(lastresult)s " "of %(totalresults)s" % pager ]), pagerelem ], **{'class': 'pager'})) data = self._transform(title, body, request.environ, template="xsl/search.xsl") return Response(data, mimetype="text/html")
def test_meta(self): # test 3: use a mix of our own elements and html elements, # with meta + uri attached to some nodes g1 = Graph().parse(format='n3', data=""" @prefix bibo: <http://purl.org/ontology/bibo/> . @prefix dcterms: <http://purl.org/dc/terms/> . <http://localhost:8000/res/base/basefile#S1> a bibo:DocumentPart; dcterms:title "First section"; bibo:chapter "1" . """) g2 = Graph().parse(format='n3', data=""" @prefix bibo: <http://purl.org/ontology/bibo/> . @prefix dcterms: <http://purl.org/dc/terms/> . @prefix owl: <http://www.w3.org/2002/07/owl#> . @prefix xsd: <http://www.w3.org/2001/XMLSchema#> . <http://localhost:8000/res/base/basefile#S2> a bibo:DocumentPart; dcterms:title "Second section"; bibo:chapter "2"; dcterms:creator "Fred Bloggs"@en-GB; dcterms:issued "2013-05-10"^^xsd:date; owl:sameAs <http://example.org/s2> . <http://example.org/s2> dcterms:title "Same same but different" . <http://localhost:8000/res/base/unlrelated> dcterms:title "Unrelated document" . """) body = el.Body([ el.Heading(['Toplevel heading'], level=1), html.P(['Introductory preamble']), html.Div([ html.P(['Some text']), el.Subsection([el.Paragraph(['More text'])], ordinal='1.1', title="First subsection") ], uri='http://localhost:8000/res/base/basefile#S1', meta=g1), el.Section([el.Paragraph(['Even more text'])], uri='http://localhost:8000/res/base/basefile#S2', meta=g2) ]) want = """ <body xmlns="http://www.w3.org/1999/xhtml" about="http://localhost:8000/res/base/basefile"> <h1>Toplevel heading</h1> <p>Introductory preamble</p> <div about="http://localhost:8000/res/base/basefile#S1" content="First section" property="dcterms:title" typeof="bibo:DocumentPart"> <span href="http://localhost:8000/res/base/basefile" rel="dcterms:isPartOf"/> <span content="1" property="bibo:chapter" xml:lang=""/> <p>Some text</p> <div about="http://localhost:8000/res/base/basefile#S1.1" content="First subsection" property="dcterms:title" typeof="bibo:DocumentPart" class="subsection"> <span href="http://localhost:8000/res/base/basefile#S1" rel="dcterms:isPartOf"/> <span about="http://localhost:8000/res/base/basefile#S1.1" content="1.1" property="bibo:chapter"/> <p>More text</p> </div> </div> <div about="http://localhost:8000/res/base/basefile#S2" class="section" content="Second section" property="dcterms:title" typeof="bibo:DocumentPart"> <span href="http://localhost:8000/res/base/basefile" rel="dcterms:isPartOf"/> <span href="http://example.org/s2" rel="owl:sameAs"> <span content="Same same but different" property="dcterms:title" xml:lang=""/> </span> <span content="2" property="bibo:chapter" xml:lang=""/> <span content="2013-05-10" property="dcterms:issued" datatype="xsd:date"/> <span content="Fred Bloggs" property="dcterms:creator" xml:lang="en-GB"/> <p>Even more text</p> </div> </body>""" self._test_asxhtml(want, body)