def sparql_output(query, F): store = F.store RESULTS_NS = 'http://www.w3.org/2005/sparql-results#' ns = store.newSymbol(SPARQL_NS) if query.contains(obj=ns['SelectQuery']): node = query.the(pred=store.type, obj=ns['SelectQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS+'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS+'head', [], prefixTracker.prefixes) vars = [] for triple in query.the(subj=node, pred=ns['select']): vars.append(triple.object()) xwr.emptyElement(RESULTS_NS+'variable', [(RESULTS_NS+' name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS+'results', [], prefixTracker.prefixes) resultFormulae = [aa for aa in F.each(pred=store.type, obj=ns['Result'])] try: resultFormulae.sort(Term.compareAnyTerm) except: print [type(x) for x in resultFormulae] print Term raise
pass xwr.endElement() xwr.endElement() xwr.endElement() xwr.endDocument() return u''.join(outputList) if query.contains(obj=ns['AskQuery']): node = query.the(pred=store.type, obj=ns['AskQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS+'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS+'head', [], prefixTracker.prefixes) vars = [] # for triple in query.the(subj=node, pred=ns['select']): # vars.append(triple.object()) # xwr.emptyElement(RESULTS_NS+'variable', [(RESULTS_NS+'name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS+'boolean', [], prefixTracker.prefixes) if F.the(pred=store.type, obj=ns['Success']): xwr.data('true') else: xwr.data('false') xwr.endElement()
def sparql_output(query, F): store = F.store RESULTS_NS = 'http://www.w3.org/2005/sparql-results#' ns = store.newSymbol(SPARQL_NS) if query.contains(obj=ns['SelectQuery']): node = query.the(pred=store.type, obj=ns['SelectQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS + 'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS + 'head', [], prefixTracker.prefixes) vars = [] for triple in query.the(subj=node, pred=ns['select']): vars.append(triple.object()) xwr.emptyElement(RESULTS_NS + 'variable', [(RESULTS_NS + ' name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS + 'results', [], prefixTracker.prefixes) resultFormulae = [ aa for aa in F.each(pred=store.type, obj=ns['Result']) ] try: resultFormulae.sort(Term.compareAnyTerm) except: print[type(x) for x in resultFormulae] print Term raise for resultFormula in resultFormulae: xwr.startElement(RESULTS_NS + 'result', [], prefixTracker.prefixes) for var in vars: binding = resultFormula.the(pred=ns['bound'], obj=var) if binding: xwr.startElement(RESULTS_NS + 'binding', [(RESULTS_NS + ' name', str(var))], prefixTracker.prefixes) if isinstance(binding, LabelledNode): xwr.startElement(RESULTS_NS + 'uri', [], prefixTracker.prefixes) xwr.data(binding.uriref()) xwr.endElement() elif isinstance(binding, (AnonymousNode, List)): xwr.startElement(RESULTS_NS + 'bnode', [], prefixTracker.prefixes) xwr.data(bnode_replace(binding.uriref())) xwr.endElement() elif isinstance(binding, Literal): props = [] if binding.datatype: props.append((RESULTS_NS + ' datatype', binding.datatype.uriref())) if binding.lang: props.append( ("http://www.w3.org/XML/1998/namespace lang", binding.lang)) xwr.startElement(RESULTS_NS + 'literal', props, prefixTracker.prefixes) xwr.data(unicode(binding)) xwr.endElement() xwr.endElement() else: pass xwr.endElement() xwr.endElement() xwr.endElement() xwr.endDocument() return u''.join(outputList) if query.contains(obj=ns['AskQuery']): node = query.the(pred=store.type, obj=ns['AskQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS + 'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS + 'head', [], prefixTracker.prefixes) vars = [] # for triple in query.the(subj=node, pred=ns['select']): # vars.append(triple.object()) # xwr.emptyElement(RESULTS_NS+'variable', [(RESULTS_NS+'name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS + 'boolean', [], prefixTracker.prefixes) if F.the(pred=store.type, obj=ns['Success']): xwr.data('true') else: xwr.data('false') xwr.endElement() xwr.endElement() xwr.endDocument() return ''.join(outputList)
def sparql_output(query, F): store = F.store RESULTS_NS = 'http://www.w3.org/2005/sparql-results#' ns = store.newSymbol(SPARQL_NS) if query.contains(obj=ns['SelectQuery']): node = query.the(pred=store.type, obj=ns['SelectQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS+'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS+'head', [], prefixTracker.prefixes) vars = [] for triple in query.the(subj=node, pred=ns['select']): vars.append(triple.object()) xwr.emptyElement(RESULTS_NS+'variable', [(RESULTS_NS+' name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS+'results', [], prefixTracker.prefixes) resultFormulae = [aa for aa in F.each(pred=store.type, obj=ns['Result'])] resultFormulae.sort(Term.compareAnyTerm) for resultFormula in resultFormulae: xwr.startElement(RESULTS_NS+'result', [], prefixTracker.prefixes) for var in vars: xwr.startElement(RESULTS_NS+'binding', [(RESULTS_NS+' name', str(var))], prefixTracker.prefixes) binding = resultFormula.the(pred=ns['bound'], obj=var) if binding: if isinstance(binding, LabelledNode): xwr.startElement(RESULTS_NS+'uri', [], prefixTracker.prefixes) xwr.data(binding.uriref()) xwr.endElement() elif isinstance(binding, (AnonymousNode, List)): xwr.startElement(RESULTS_NS+'bnode', [], prefixTracker.prefixes) xwr.data(binding.uriref()) xwr.endElement() elif isinstance(binding, Literal): props = [] if binding.datatype: props.append((RESULTS_NS+' datatype', binding.datatype.uriref())) if binding.lang: props.append(("http://www.w3.org/XML/1998/namespace lang", binding.lang)) xwr.startElement(RESULTS_NS+'literal', props, prefixTracker.prefixes) xwr.data(unicode(binding)) xwr.endElement() else: xwr.emptyElement(RESULTS_NS+'unbound', [], prefixTracker.prefixes) xwr.endElement() xwr.endElement() xwr.endElement() xwr.endElement() xwr.endDocument() return u''.join(outputList) if query.contains(obj=ns['AskQuery']): node = query.the(pred=store.type, obj=ns['AskQuery']) outputList = [] prefixTracker = RDFSink() prefixTracker.setDefaultNamespace(RESULTS_NS) prefixTracker.bind('', RESULTS_NS) xwr = XMLWriter(outputList.append, prefixTracker) xwr.makePI('xml version="%s"' % '1.0') xwr.startElement(RESULTS_NS+'sparql', [], prefixTracker.prefixes) xwr.startElement(RESULTS_NS+'head', [], prefixTracker.prefixes) vars = [] # for triple in query.the(subj=node, pred=ns['select']): # vars.append(triple.object()) # xwr.emptyElement(RESULTS_NS+'variable', [(RESULTS_NS+'name', str(triple.object()))], prefixTracker.prefixes) xwr.endElement() xwr.startElement(RESULTS_NS+'boolean', [], prefixTracker.prefixes) if F.the(pred=store.type, obj=ns['Success']): xwr.data('true') else: xwr.data('false') xwr.endElement() xwr.endElement() xwr.endDocument() return ''.join(outputList)
def generate(title, inputfilename, outputfilename=''): """Generate an HTML version of the grammar, given a title, an input filename (X.g) and an output filename (defaulting to X.py).""" import sys, codecs from toXML import XMLWriter dummy, dummy, dummy, encWriter = codecs.lookup('utf-8') if not outputfilename: if inputfilename[-2:] == '.g': outputfilename = inputfilename[:-2] + '.html' else: raise "Invalid Filename", outputfilename DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers preparser, postparser = None, None # Code before and after the parser desc # Read the entire file s = open(inputfilename, 'r').read() # See if there's a separation between the pre-parser and parser f = find(s, DIVIDER) if f >= 0: preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):] # See if there's a separation between the parser and post-parser f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):] # Create the parser and scanner p = yapps2.ParserDescription(yapps2.ParserDescriptionScanner(s)) if not p: return # Now parse the file t = yappsrt.wrap_error_reporter(p, 'Parser') if not t: return # Error # Generate the output xwr = XMLWriter(encWriter(sys.stdout)) xwr.startElement('html') #@@ xmlns xwr.startElement('head') xwr.startElement('title') xwr.data(title) xwr.endElement() xwr.endElement() xwr.startElement('body') xwr.startElement('h1') xwr.data(title) xwr.endElement() # h1 xwr.startElement('address') xwr.data('source: ') xwr.startElement('a', [('href', inputfilename)]) xwr.data(inputfilename) xwr.endElement() #a xwr.data(', a ') xwr.startElement('a', [('href', 'http://theory.stanford.edu/~amitp/Yapps/')]) xwr.data('YAPPS') xwr.endElement() #a xwr.data(' grammar') xwr.endElement() #address toHTML(t, xwr) xwr.endElement() # body xwr.endElement() # html
def generate(title, inputfilename, outputfilename=''): """Generate an HTML version of the grammar, given a title, an input filename (X.g) and an output filename (defaulting to X.py).""" import sys, codecs from toXML import XMLWriter dummy, dummy, dummy, encWriter = codecs.lookup('utf-8') if not outputfilename: if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.html' else: raise "Invalid Filename", outputfilename DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers preparser, postparser = None, None # Code before and after the parser desc # Read the entire file s = open(inputfilename,'r').read() # See if there's a separation between the pre-parser and parser f = find(s, DIVIDER) if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):] # See if there's a separation between the parser and post-parser f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):] # Create the parser and scanner p = yapps2.ParserDescription(yapps2.ParserDescriptionScanner(s)) if not p: return # Now parse the file t = yappsrt.wrap_error_reporter(p, 'Parser') if not t: return # Error # Generate the output xwr = XMLWriter(encWriter(sys.stdout)) xwr.startElement('html') #@@ xmlns xwr.startElement('head') xwr.startElement('title') xwr.data(title) xwr.endElement() xwr.endElement() xwr.startElement('body') xwr.startElement('h1') xwr.data(title) xwr.endElement() # h1 xwr.startElement('address') xwr.data('source: ') xwr.startElement('a', [('href', inputfilename)]) xwr.data(inputfilename) xwr.endElement() #a xwr.data(', a ') xwr.startElement('a', [('href', 'http://theory.stanford.edu/~amitp/Yapps/')]) xwr.data('YAPPS') xwr.endElement() #a xwr.data(' grammar') xwr.endElement() #address toHTML(t, xwr) xwr.endElement() # body xwr.endElement() # html