def main(): testFiles = [] diffFiles = [] assumptions = Set() global ploughOn # even if error ploughOn = 0 global verbose global lumped verbose = 0 lumped = 1 try: opts, args = getopt.getopt(sys.argv[1:], "hf:t:m:v:g:", ["help", "from=", "to=", "meta=", "verbose=", "granularity="]) except getopt.GetoptError: # print help information and exit: usage() sys.exit(2) output = None for o, a in opts: if o in ("-h", "--help"): usage() sys.exit(2) if o in ("-v", "--verbose"): try: verbose = int(a) except ValueError: verbose = 10 if o in ("-g", "--granularity"): try: lumped = int(a) except ValueError: lumped = 0 if o in ("-f", "--from"): testFiles.append(a) if o in ("-t", "--to"): diffFiles.append(a) if o in ("-m", "--meta"): assumptions.add(a) # if testFiles == []: testFiles = [ "/dev/stdin" ] if testFiles == []: usage() sys.exit(2) graph = loadFiles(testFiles) version = "$Id$"[1:-1] if diffFiles == []: nailFormula(graph, assumptions) if verbose > 1: print "# Smush generated by " + version print graph.close().n3String(base=base(), flags="a") sys.exit(0) graph2 = loadFiles(diffFiles) delta = differences(graph, graph2, assumptions) if verbose >1: print "# Differences by " + version print delta.close().n3String(base=base()) # sys.exit(len(delta)) # sys.exit(0) # didn't crash if delta.contains(): # Any statements in delta at all? sys.exit(1) else: sys.exit(0)
def testParser(command, kb, output, errorFile): """The main parser tester """ temp_adder = md5it(command) commandNode = output.newBlankNode() output.add(commandNode, rdf.type, n3test.N3Parser) output.add(commandNode, n3test.command, command) for test in serial(gatherDAWGStyleTests(kb), gatherCwmStyleTests(kb)): name, type, description, inputDocument, outputDocument = test case = name + temp_adder + ".out" # Make up temp filename tempFile = output.newSymbol(join(base(), ',temp/' + case)) # if description == None: description = case + " (no description)" output.add(inputDocument, rdf.type, n3test.Input) output.add(inputDocument, n3test.expected, type) output.add(inputDocument, n3test.description, description) #result = 1 thisCommand = ((command + ' > %s 2>>%s') % \ (inputDocument.uriref(), tempFile.uriref()[5:], errorFile)) result = system(thisCommand) print thisCommand if result != 0: # Error case: output.add(commandNode, n3test.failsParsing, inputDocument) parseResult = output.newBlankNode() output.add(inputDocument, commandNode, parseResult) output.add(parseResult, n3test.isFile, rdf.nil) ef = open(errorFile, "r") output.add(parseResult, n3test.errorMessage, ef.read().decode('utf-8')) ef.close() else: output.add(commandNode, n3test.parses, inputDocument) parseResult = output.newBlankNode() output.add(inputDocument, commandNode, parseResult) output.add(parseResult, n3test.isFile, tempFile) if outputDocument is None: output.add(parseResult, n3test.doesNotMatch, rdf.nil) else: a = output.newBlankNode() child_stdin, child_stdout = popen4("%s %s -f %s -d %s" % \ ('python', '$SWAP/cant.py', tempFile.uriref(), outputDocument.uriref())) output.add(a, rdf.type, n3test.Diff) try: ds = "".join([escapize(ii) for ii in child_stdout.read().decode('utf-8')]) except UnicodeDecodeError: ds = "unicode decode error in diff" output.add(a, n3test.diffString, ds) output.add(parseResult, a, outputDocument)
def main(argv): """Usage: n3absyn.py foo.n3 --pprint | --lisp | --rif | --mathml --pprint to print the JSON structure using python's pretty printer --lisp to print a lisp s-expression for use with ACL2 --rif for Rule Interchange Format (RIF) --mathml for MathML """ path = argv[1] addr = uripath.join(uripath.base(), path) kb = llyn.RDFStore() f = kb.load(addr) j = json_formula(f) if '--pprint' in argv: import pprint pprint.pprint(j) elif '--lisp' in argv: for s in lisp_form(j): sys.stdout.write(s) elif '--ikl' in argv: for s in ikl_sentence(j, []): sys.stdout.write(s) elif '--rif' in argv: for s in xml_form(j): sys.stdout.write(s) elif '--mathml' in argv: for s in mathml_top(): sys.stdout.write(s) for s in mathml_form(j): sys.stdout.write(s) elif '--defns' in argv: # split top-level N3 constructs assert (j[0]) == 'forall' assert (j[2][0]) == 'and' for s in mathml_top(): sys.stdout.write(s) sys.stdout.write('<html xmlns="http://www.w3.org/1999/xhtml">\n') sys.stdout.write('<head><title>defns</title></head>\n') sys.stdout.write('<body>\n') sys.stdout.write("<ul>\n") for expr in j[2][1:]: sys.stdout.write("<li>\n") for s in mathml_form(expr): sys.stdout.write(s) sys.stdout.write("</li>\n") sys.stdout.write("</ul>\n") sys.stdout.write("</body>\n") sys.stdout.write("</html>\n")
def main(argv): """Usage: n3absyn.py foo.n3 --pprint | --lisp | --rif | --mathml --pprint to print the JSON structure using python's pretty printer --lisp to print a lisp s-expression for use with ACL2 --rif for Rule Interchange Format (RIF) --mathml for MathML """ path = argv[1] addr = uripath.join(uripath.base(), path) kb = llyn.RDFStore() f = kb.load(addr) j = json_formula(f) if '--pprint' in argv: import pprint pprint.pprint(j) elif '--lisp' in argv: for s in lisp_form(j): sys.stdout.write(s) elif '--ikl' in argv: for s in ikl_sentence(j, []): sys.stdout.write(s) elif '--rif' in argv: for s in xml_form(j): sys.stdout.write(s) elif '--mathml' in argv: for s in mathml_top(): sys.stdout.write(s) for s in mathml_form(j): sys.stdout.write(s) elif '--defns' in argv: # split top-level N3 constructs assert(j[0]) == 'forall' assert(j[2][0]) == 'and' for s in mathml_top(): sys.stdout.write(s) sys.stdout.write('<html xmlns="http://www.w3.org/1999/xhtml">\n') sys.stdout.write('<head><title>defns</title></head>\n') sys.stdout.write('<body>\n') sys.stdout.write("<ul>\n") for expr in j[2][1:]: sys.stdout.write("<li>\n") for s in mathml_form(expr): sys.stdout.write(s) sys.stdout.write("</li>\n") sys.stdout.write("</ul>\n") sys.stdout.write("</body>\n") sys.stdout.write("</html>\n")
def photo(self, uri, lon, lat): x, y = self.deg_to_px(lon, lat) rel = refTo(base(), uri) while 1: for x2, y2 in self.marks: if sqrt((x - x2) * (x - x2) + (y - y2) * (y - y2)) < 7: x, y = x + 9, y - 9 # shift break else: break self.marks.append((x, y)) self.wr("""<a xlink:href='%s'> <rect x='%i' y='%i' width='14' height='8' style='fill:#777;stroke:black'/> <circle cx='%i' cy='%i' r='3'/> </a>""" % (rel, x - 7, y - 4, x, y))
def photo(self, uri, lon, lat): x, y = self.deg_to_px(lon, lat) rel = refTo(base(), uri) while 1: for x2, y2 in self.marks: if sqrt((x-x2)*(x-x2) + (y-y2)*(y-y2)) < 7: x, y = x + 9, y - 9 # shift break else: break self.marks.append((x, y)) self.wr("""<a xlink:href='%s'> <rect x='%i' y='%i' width='14' height='8' style='fill:#777;stroke:black'/> <circle cx='%i' cy='%i' r='3'/> </a>""" %(rel, x-7, y-4, x, y))
def from_string(s): if False: #@@ looks like some of the 'why' magic in load is needed store = llyn.RDFStore() proof = notation3.SinkParser(store, baseURI=uripath.base()) proof.startDoc() proof.feed(s.encode('utf-8')) proof.endDoc() #proof.close() #@@ store close? return formula.Formula(store) else: from tempfile import mktemp filename = mktemp() tmpfile = file(filename, "w") tmpfile.write(s) tmpfile.close() from swap.myStore import load return load(filename)
def doCommand(): """Command line RDF/N3 tool <command> <options> <steps> [--with <more args> ] options: --pipe Don't store, just pipe out * steps, in order left to right: --rdf Input & Output ** in RDF/XML insead of n3 from now on --n3 Input & Output in N3 from now on. (Default) --rdf=flags Input & Output ** in RDF and set given RDF flags --n3=flags Input & Output in N3 and set N3 flags --ntriples Input & Output in NTriples (equiv --n3=usbpartane -bySubject -quiet) --language=x Input & Output in "x" (rdf, n3, etc) --rdf same as: --language=rdf --languageOptions=y --n3=sp same as: --language=n3 --languageOptions=sp --ugly Store input and regurgitate, data only, fastest * --bySubject Store input and regurgitate in subject order * --no No output * (default is to store and pretty print with anonymous nodes) * --base=<uri> Set the base URI. Input or output is done as though theis were the document URI. --closure=flags Control automatic lookup of identifiers (see below) <uri> Load document. URI may be relative to current directory. --apply=foo Read rules from foo, apply to store, adding conclusions to store --patch=foo Read patches from foo, applying insertions and deletions to store --filter=foo Read rules from foo, apply to store, REPLACING store with conclusions --query=foo Read a N3QL query from foo, apply it to the store, and replace the store with its conclusions --sparql=foo Read a SPARQL query from foo, apply it to the store, and replace the store with its conclusions --rules Apply rules in store to store, adding conclusions to store --think as -rules but continue until no more rule matches (or forever!) --engine=otter use otter (in your $PATH) instead of llyn for linking, etc --why Replace the store with an explanation of its contents --why=u proof tries to be shorter --mode=flags Set modus operandi for inference (see below) --reify Replace the statements in the store with statements describing them. --dereify Undo the effects of --reify --flatten Reify only nested subexpressions (not top level) so that no {} remain. --unflatten Undo the effects of --flatten --think=foo as -apply=foo but continue until no more rule matches (or forever!) --purge Remove from store any triple involving anything in class log:Chaff --data Remove all except plain RDF triples (formulae, forAll, etc) --strings Dump :s to stdout ordered by :k whereever { :k log:outputString :s } --crypto Enable processing of crypto builtin functions. Requires python crypto. --help print this message --revision print CVS revision numbers of major modules --chatty=50 Verbose debugging output of questionable use, range 0-99 --sparqlServer instead of outputting, start a SPARQL server on port 8000 of the store --sparqlResults After sparql query, print in sparqlResults format instead of rdf finally: --with Pass any further arguments to the N3 store as os:argv values * mutually exclusive ** doesn't work for complex cases :-/ Examples: cwm --rdf foo.rdf --n3 --pipe Convert from rdf/xml to rdf/n3 cwm foo.n3 bar.n3 --think Combine data and find all deductions cwm foo.n3 --flat --n3=spart Mode flags affect inference extedning to the web: r Needed to enable any remote stuff. a When reading schema, also load rules pointed to by schema (requires r, s) E Errors loading schemas of definitive documents are ignored m Schemas and definitive documents laoded are merged into the meta knowledge (otherwise they are consulted independently) s Read the schema for any predicate in a query. u Generate unique ids using a run-specific Closure flags are set to cause the working formula to be automatically exapnded to the closure under the operation of looking up: s the subject of a statement added p the predicate of a statement added o the object of a statement added t the object of an rdf:type statement added i any owl:imports documents r any doc:rules documents E errors are ignored --- This is independant of --mode=E n Normalize IRIs to URIs e Smush together any nodes which are = (owl:sameAs) See http://www.w3.org/2000/10/swap/doc/cwm for more documentation. Setting the environment variable CWM_RDFLIB to 1 maked Cwm use rdflib to parse rdf/xml files. Note that this requires rdflib. """ import time import sys from swap import myStore # These would just be attributes if this were an object global _store global workingContext option_need_rdf_sometime = 0 # If we don't need it, don't import it # (to save errors where parsers don't exist) option_pipe = 0 # Don't store, just pipe though option_inputs = [] option_reify = 0 # Flag: reify on output (process?) option_flat = 0 # Flag: reify on output (process?) option_crypto = 0 # Flag: make cryptographic algorithms available setTracking(0) option_outURI = None option_outputStyle = "-best" _gotInput = 0 # Do we not need to take input from stdin? option_meta = 0 option_normalize_iri = 0 option_flags = { "rdf":"l", "n3":"", "think":"", "sparql":""} # RDF/XML serializer can't do list ("collection") syntax. option_quiet = 0 option_with = None # Command line arguments made available to N3 processing option_engine = "llyn" option_why = "" _step = 0 # Step number used for metadata _genid = 0 hostname = "localhost" # @@@@@@@@@@@ Get real one # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() option_format = "n3" # set the default format option_first_format = None _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # First pass on command line - - - - - - - P A S S 1 for argnum in range(1,len(sys.argv)): # options after script name arg = sys.argv[argnum] if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to - # _equals = string.find(arg, "=") _lhs = "" _rhs = "" try: [_lhs,_rhs]=arg.split('=',1) try: _uri = join(option_baseURI, _rhs) except ValueError: _uri = _rhs except ValueError: pass if arg == "-ugly": option_outputStyle = arg elif _lhs == "-base": option_baseURI = _uri elif arg == "-rdf": option_format = "rdf" if option_first_format == None: option_first_format = option_format option_need_rdf_sometime = 1 elif _lhs == "-rdf": option_format = "rdf" if option_first_format == None: option_first_format = option_format option_flags["rdf"] = _rhs option_need_rdf_sometime = 1 elif arg == "-n3": option_format = "n3" if option_first_format == None: option_first_format = option_format elif _lhs == "-n3": option_format = "n3" if option_first_format == None: option_first_format = option_format option_flags["n3"] = _rhs elif _lhs == "-mode": option_flags["think"] = _rhs elif _lhs == "-closure": if "n" in _rhs: option_normalize_iri = 1 #elif _lhs == "-solve": # sys.argv[argnum+1:argnum+1] = ['-think', '-filter=' + _rhs] elif _lhs == "-language": option_format = _rhs if option_first_format == None: option_first_format = option_format elif _lhs == "-languageOptions": option_flags[option_format] = _rhs elif arg == "-quiet": option_quiet = 1 elif arg == "-pipe": option_pipe = 1 elif arg == "-crypto": option_crypto = 1 elif _lhs == "-why": diag.tracking=1 diag.setTracking(1) option_why = _rhs elif arg == "-why": diag.tracking=1 diag.setTracking(1) option_why = "" elif arg == "-track": diag.tracking=1 diag.setTracking(1) elif arg == "-bySubject": option_outputStyle = arg elif arg == "-no": option_outputStyle = "-no" elif arg == "-debugString": option_outputStyle = "-debugString" elif arg == "-strings": option_outputStyle = "-no" elif arg == "-sparqlResults": option_outputStyle = "-no" elif arg == "-triples" or arg == "-ntriples": option_format = "n3" option_flags["n3"] = "bravestpun" option_outputStyle = "-bySubject" option_quiet = 1 elif _lhs == "-outURI": option_outURI = _uri elif _lhs == "-chatty": setVerbosity(int(_rhs)) elif arg[:7] == "-apply=": pass elif arg[:7] == "-patch=": pass elif arg == "-reify": option_reify = 1 elif arg == "-flat": option_flat = 1 elif arg == "-help": print doCommand.__doc__ print notation3.ToN3.flagDocumentation print toXML.ToRDF.flagDocumentation try: from swap import sax2rdf # RDF1.0 syntax parser to N3 RDF stream print sax2rdf.RDFXMLParser.flagDocumentation except: pass return elif arg == "-revision": progress( "cwm=",cvsRevision, "llyn=", llyn.cvsRevision) return elif arg == "-with": option_with = sys.argv[argnum+1:] # The rest of the args are passed to n3 break elif arg[0] == "-": pass # Other option else : option_inputs.append(join(option_baseURI, arg)) _gotInput = _gotInput + 1 # input filename # Between passes, prepare for processing setVerbosity(0) if not option_normalize_iri: llyn.canonical = lambda x: x # Base defauts if option_baseURI == _baseURI: # Base not specified explicitly - special case if _outURI == _baseURI: # Output name not specified either if _gotInput == 1: # But input file *is*, _outURI = option_inputs[0] # Just output to same URI option_baseURI = _outURI # using that as base. if diag.tracking: _outURI = RDFSink.runNamespace()[:-1] option_baseURI = _outURI option_baseURI = splitFrag(option_baseURI)[0] # Fix the output sink if option_format == "rdf": _outSink = toXML.ToRDF(sys.stdout, _outURI, base=option_baseURI, flags=option_flags["rdf"]) elif option_format == "n3" or option_format == "sparql": _outSink = notation3.ToN3(sys.stdout.write, base=option_baseURI, quiet=option_quiet, flags=option_flags["n3"]) elif option_format == "trace": _outSink = RDFSink.TracingRDFSink(_outURI, base=option_baseURI, flags=option_flags.get("trace","")) if option_pipe: # this is really what a parser wants to dump to _outSink.backing = llyn.RDFStore( _outURI+"#_g", argv=option_with, crypto=option_crypto) else: # this is really what a store wants to dump to _outSink.backing = notation3.ToN3(sys.stdout.write, base=option_baseURI, quiet=option_quiet, flags=option_flags["n3"]) # hm. why does TimBL use sys.stdout.write, above? performance at the else: raise NotImplementedError version = "$Id: cwm.py,v 1.197 2007/12/13 15:38:39 syosi Exp $" if not option_quiet and option_outputStyle != "-no": _outSink.makeComment("Processed by " + version[1:-1]) # Strip $ to disarm _outSink.makeComment(" using base " + option_baseURI) if option_flat: _outSink = notation3.Reifier(_outSink, _outURI+ "#_formula", flat=1) if diag.tracking: myReason = BecauseOfCommandLine(`sys.argv`) # @@ add user, host, pid, pwd, date time? Privacy! else: myReason = None if option_pipe: _store = _outSink workingContext = _outSink #.newFormula() else: if "u" in option_flags["think"]: _store = llyn.RDFStore(argv=option_with, crypto=option_crypto) else: _store = llyn.RDFStore( _outURI+"#_g", argv=option_with, crypto=option_crypto) myStore.setStore(_store) if _gotInput: workingContext = _store.newFormula(option_inputs [0]+"#_work") newTopLevelFormula(workingContext) else: # default input if option_first_format is None: option_first_format = option_format ContentType={ "rdf": "application/xml+rdf", "n3": "text/rdf+n3", "sparql": "x-application/sparql"}[option_first_format] workingContext = _store.load( # asIfFrom = join(_baseURI, ".stdin"), asIfFrom = _baseURI, contentType = ContentType, flags = option_flags[option_first_format], remember = 0, referer = "", why = myReason, topLevel=True) workingContext.reopen() workingContext.stayOpen = 1 # Never canonicalize this. Never share it. # ____________________________________________________________________ # Take commands from command line:- - - - - P A S S 2 option_format = "n3" # Use RDF/n3 rather than RDF/XML option_flags = { "rdf":"l", "n3":"", "think": "", "sparql":"" } option_quiet = 0 _outURI = _baseURI option_baseURI = _baseURI # To start with def filterize(): """implementation of --filter for the --filter command, so we don't have it printed twice """ global workingContext global r workingContext = workingContext.canonicalize() _store._formulaeOfLength = {} filterContext = _store.newFormula() newTopLevelFormula(filterContext) _store.load(_uri, openFormula=filterContext, why=myReason, referer="") _newContext = _store.newFormula() newTopLevelFormula(_newContext) applyRules(workingContext, filterContext, _newContext) workingContext.close() workingContext = _newContext sparql_query_formula = None for arg in sys.argv[1:]: # Command line options after script name if verbosity()>5: progress("Processing %s." % (arg)) if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to - _equals = string.find(arg, "=") _lhs = "" _rhs = "" if _equals >=0: _lhs = arg[:_equals] _rhs = arg[_equals+1:] try: _uri = join(option_baseURI, _rhs) except ValueError: _uri =_rhs if arg[0] != "-": _inputURI = join(option_baseURI, splitFrag(arg)[0]) assert ':' in _inputURI ContentType={ "rdf": "application/xml+rdf", "n3": "text/rdf+n3", "sparql": "x-application/sparql"}[option_format] if not option_pipe: workingContext.reopen() try: load(_store, _inputURI, openFormula=workingContext, contentType =ContentType, flags=option_flags[option_format], referer="", why=myReason) except: progress(_inputURI) raise _gotInput = 1 elif arg == "-help": pass # shouldn't happen elif arg == "-revision": pass elif _lhs == "-base": option_baseURI = _uri if verbosity() > 10: progress("Base now "+option_baseURI) elif arg == "-ugly": option_outputStyle = arg elif arg == "-crypto": pass elif arg == "-pipe": pass elif _lhs == "-outURI": option_outURI = _uri elif arg == "-rdf": option_format = "rdf" elif _lhs == "-rdf": option_format = "rdf" option_flags["rdf"] = _rhs elif _lhs == "-mode": option_flags["think"] = _rhs elif _lhs == "-closure": workingContext.setClosureMode(_rhs) elif arg == "-n3": option_format = "n3" elif _lhs == "-n3": option_format = "n3" option_flags["n3"] = _rhs elif _lhs == "-language": option_format = _rhs if option_first_format == None: option_first_format = option_format elif _lhs == "-languageOptions": option_flags[option_format] = _lhs elif arg == "-quiet" : option_quiet = 1 elif _lhs == "-chatty": setVerbosity(int(_rhs)) elif arg[:7] == "-track=": diag.tracking = int(_rhs) elif option_pipe: ############## End of pipable options print "# Command line error: %s illegal option with -pipe", arg break elif arg == "-triples" or arg == "-ntriples": option_format = "n3" option_flags["n3"] = "spartan" option_outputStyle = "-bySubject" option_quiet = 1 elif arg == "-bySubject": option_outputStyle = arg elif arg == "-debugString": option_outputStyle = arg elif arg[:7] == "-apply=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() applyRules(workingContext, filterContext); elif arg[:7] == "-apply=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() applyRules(workingContext, filterContext); elif arg[:7] == "-patch=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() patch(workingContext, filterContext); elif _lhs == "-filter": filterize() elif _lhs == "-query": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) _newContext = _store.newFormula() applyQueries(workingContext, filterContext, _newContext) workingContext.close() workingContext = _newContext elif _lhs == "-sparql": workingContext.stayOpen = False workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, why=myReason, referer="", contentType="x-application/sparql") _newContext = _store.newFormula() _newContext.stayOpen = True sparql_query_formula = filterContext applySparqlQueries(workingContext, filterContext, _newContext) # workingContext.close() workingContext = _newContext elif _lhs == "-why" or arg == "-why": workingContext.stayOpen = False workingContext = workingContext.close() workingContext = explainFormula(workingContext, option_why) # Can't prove proofs diag.tracking=0 diag.setTracking(0) elif arg == "-dump": workingContext = workingContext.canonicalize() progress("\nDump of working formula:\n" + workingContext.debugString()) elif arg == "-purge": workingContext.reopen() _store.purge(workingContext) elif arg == "-purge-rules" or arg == "-data": workingContext.reopen() _store.purgeExceptData(workingContext) elif arg == "-rules": workingContext.reopen() applyRules(workingContext, workingContext) elif arg[:7] == "-think=": filterContext = _store.load(_uri, referer="", why=myReason, topLevel=True) if verbosity() > 4: progress( "Input rules to --think from " + _uri) workingContext.reopen() think(workingContext, filterContext, mode=option_flags["think"]) elif arg[:7] == "-solve=": # --solve is a combination of --think and --filter. think(workingContext, mode=option_flags["think"]) filterize() elif _lhs == "-engine": option_engine = _rhs elif arg == "-think": workingContext.isWorkingContext = True think(workingContext, mode=option_flags["think"]) elif arg == '-rete': from swap import pycwmko pythink = pycwmko.directPychinkoQuery(workingContext) #return #pythink() """ from pychinko import interpreter from swap.set_importer import Set, ImmutableSet pyf = pycwmko.N3Loader.N3Loader() conv = pycwmko.ToPyStore(pyf) conv.statements(workingContext) interp = interpreter.Interpreter(pyf.rules[:]) interp.addFacts(Set(pyf.facts), initialSet=True) interp.run() pyf.facts = interp.totalFacts workingContext = workingContext.store.newFormula() reconv = pycwmko.FromPyStore(workingContext, pyf) reconv.run() """ elif arg == '-sparqlServer': from swap.sparql import webserver from swap import cwm_sparql sandBoxed(True) workingContext.stayOpen = False workingContext = workingContext.canonicalize() def _handler(s): return cwm_sparql.sparql_queryString(workingContext, s) webserver.sparql_handler = _handler webserver.run() elif arg == "-lxkbdump": # just for debugging raise NotImplementedError elif arg == "-lxfdump": # just for debugging raise NotImplementedError elif _lhs == "-prove": # code copied from -filter without really being understood -sdh _tmpstore = llyn.RDFStore( _outURI+"#_g", metaURI=_metaURI, argv=option_with, crypto=option_crypto) tmpContext = _tmpstore.newFormula(_uri+ "#_formula") _newURI = join(_baseURI, "_w_"+`_genid`) # Intermediate _genid = _genid + 1 _newContext = _tmpstore.newFormula(_newURI+ "#_formula") _tmpstore.loadURI(_uri) print targetkb elif arg == "-flatten": #raise NotImplementedError from swap import reify workingContext = reify.flatten(workingContext) elif arg == "-unflatten": from swap import reify workingContext = reify.unflatten(workingContext) #raise NotImplementedError elif arg == "-reify": from swap import reify workingContext = reify.reify(workingContext) elif arg == "-dereify": from swap import reify workingContext = reify.dereify(workingContext) elif arg == "-size": progress("Size: %i statements in store, %i in working formula." %(_store.size, workingContext.size())) elif arg == "-strings": # suppress output workingContext.outputStrings() option_outputStyle = "-no" elif arg == '-sparqlResults': from cwm_sparql import outputString, SPARQL_NS ns = _store.newSymbol(SPARQL_NS) if not sparql_query_formula: raise ValueError('No query') else: sys.stdout.write(outputString(sparql_query_formula, workingContext)[0].encode('utf_8')) option_outputStyle = "-no" elif arg == "-no": # suppress output option_outputStyle = arg elif arg[:8] == "-outURI=": pass elif arg == "-with": break else: progress( "cwm: Unknown option: " + arg) sys.exit(-1) # Squirt it out if not piped workingContext.stayOpen = 0 # End its use as an always-open knoweldge base if option_pipe: workingContext.endDoc() else: if hasattr(_outSink, "serializeKB"): raise NotImplementedError else: if verbosity()>5: progress("Begining output.") workingContext = workingContext.close() assert workingContext.canonical != None if option_outputStyle == "-ugly": _store.dumpChronological(workingContext, _outSink) elif option_outputStyle == "-bySubject": _store.dumpBySubject(workingContext, _outSink) elif option_outputStyle == "-no": pass elif option_outputStyle == "-debugString": print workingContext.debugString() else: # "-best" _store.dumpNested(workingContext, _outSink, flags=option_flags[option_format])
def testCwmSparql(kb, output, errorFile): """The main parser tester """ temp_adder = '_dawg_test' commandNode = output.newBlankNode() thisProgram = output.newSymbol('http://www.w3.org/2000/10/swap/test/sparql/dawg_tester.py') cwmURI = output.newSymbol('http://www.w3.org/2000/10/swap/doc/cwm#') testCount = 0 for test in gatherDAWGStyleTests(kb): testCount += 1 ## if testCount < 317: ## continue testURI, name, type, description, queryDocument, inputDocument, outputDocument = test print('%s %s\t%s\t%s\t%s\t%s' % (testCount, testURI, name, description, queryDocument.uriref(), type)) case = (name + temp_adder + ".out").replace(' ', '_').replace('\n', '_').replace('\t', '_').replace('|', '_').replace('\\', '_').replace('/', '_').replace('&', '_').replace("'", '_').replace('"', '_') # Make up temp filename tempFile = output.newSymbol(join(base(), ',temp/' + case)) if type == 'Query': if inputDocument is None: inputDocument = lambda : 'empty.n3' inputDocument.uriref = inputDocument try: inputDocument.uriref() except: raise ValueError(inputDocument) thisCommand = ('python ../../cwm.py %s --sparql=%s --sparqlResults > %s' % (inputDocument.uriref(), queryDocument.uriref(), tempFile.uriref()[5:])) print thisCommand result = system(thisCommand) if result != 0: result = earl.fail else: if outputDocument.uriref()[-3:] == 'srx': # sparql results format. how do we deal with that? resultString = sparqlResults2Turtle(outputDocument.uriref()) outputDocument = output.newSymbol(tempFile.uriref() + '2') tempFile2 = outputDocument.uriref()[5:] temp2 = file(tempFile2, 'w') try: temp2.write(resultString) finally: temp2.close() temp = file(tempFile.uriref()[5:], 'r') try: tempString = temp.read() finally: temp.close() if 'sparql xmlns="http://www.w3.org/2005/sparql-results#"' in tempString: resultString = sparqlResults2Turtle(tempFile.uriref()) tempFile = output.newSymbol(tempFile.uriref() + '3') tempFile2 = tempFile.uriref()[5:] temp2 = file(tempFile2, 'w') try: temp2.write(resultString) finally: temp2.close() else: resultString = output.store.load(tempFile.uriref(), contentType="application/rdf+xml").ntString() tempFile = output.newSymbol(tempFile.uriref() + '3') tempFile2 = tempFile.uriref()[5:] temp2 = file(tempFile2, 'w') try: temp2.write(resultString) finally: temp2.close() result = system('python ../../cwm.py %s --ntriples | python ../../cant.py -d %s' % (outputDocument.uriref(), tempFile.uriref())) if result == 0: result = earl['pass'] else: result = earl['fail'] else: thisCommand = ('python ../../cwm.py --language=sparql %s > /dev/null 2> /dev/null' % (queryDocument.uriref(), )) result = system(thisCommand) if (result == 0 and type == 'Positive') or \ (result != 0 and type == 'Negative'): result = earl['pass'] else: system('echo %s >> %s' % (thisCommand, errorFile)) thisCommand = ('python ../../cwm.py --language=sparql %s > /dev/null 2>> %s' % (queryDocument.uriref(), errorFile)) system(thisCommand) result = earl['fail'] caseURI = output.newBlankNode() output.add(caseURI, rdf.type, earl.Assertion) output.add(caseURI, earl.assertedBy, thisProgram) output.add(caseURI, earl.subject, cwmURI) output.add(caseURI, earl.test, testURI) resultURI = output.newBlankNode() output.add(caseURI, earl.result, resultURI) output.add(resultURI, rdf.type, earl.TestResult) output.add(resultURI, earl.outcome, result) if outputDocument: output.add(resultURI, mf.result, outputDocument) output.add(resultURI, mf.got, tempFile) print '\t\t\tresult\t', result
def doCommand(startDate, endDate, inputURIs=["/dev/stdin"],totalsFilename=None): """Fin - financial summary <command> <options> <inputURIs> Totals transactions by classes to which they are known to belong This is or was http://www.w3.org/2000/10/swap/pim/fin.py """ #import urllib #import time import sys # global sax2rdf global kb, tax def noteError(e): if not errors.get(s, None): errors[s] = []; errors[s].append(e) # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base fatalErrors = 0; # Load the data: kb = loadMany(inputURIs) qu_date = qu.date qu_in_USD = qu.in_USD qu_amount = qu.amount qu_payee = qu.payee qu_Classified = qu.Classified qu_Unclassified = qu.Unclassified taxCategories = kb.each(pred=rdf_type, obj=tax.Category) if verbose: progress("Tax categories" + `taxCategories`) specialCategories = taxCategories + [qu.Classified, qu.Unclassified, qu.Transaction] ####### Analyse the data: numberOfMonths = monthOfDate(endDate) - monthOfDate(startDate) monthTotals = [0] * numberOfMonths incomeByMonth = [0] * numberOfMonths income, outgoings = 0,0 outgoingsByMonth = [0] * numberOfMonths quCategories = kb.each(pred=rdf_type, obj=qu.Cat) bottomCategories = []; for c in quCategories: if isBottomClass(c): bottomCategories.append(c); totals = {} # Total by all classes of transaction count = {} # Number of transactions byMonth = {} sts = kb.statementsMatching(pred=qu.amount) # Ideally one per transaction errors = {} for st in sts: s = st.subject() uri = s.uriref() # classified = kb.each(pred=rdf_type, obj=qu_Classified) # unclassified = kb.each(pred=rdf_type, obj=qu_Unclassified) # for t in classified: assert t not in unclassified, "Can't be classified and unclassified!"+`t` # for s in classified + unclassified: # progress( "Transaction ", `s`) t_ok, c_ok = 0, 0 cats = allClasses(kb.each(subj=s, pred=rdf.type)) # progress( "Categories: "+`cats`) month = monthNumber(s) if month not in range(numberOfMonths) : continue payees = kb.each(subj=s, pred=qu_payee) if not payees: progress("@@@ Error: No payee for "+`uri`) payee = "@@@@@@ Unknown"; fatalErrors += 1; elif len(payees) >1 and str(payees[0]) == "Check": payee = payees[1] else: payee = payees[0] amounts = kb.each(subj=s, pred=qu_in_USD) if len(amounts) == 0: amounts = kb.each(subj=s, pred=qu_amount) if len(amounts) == 0: progress("@@@ Error: No USD amount for "+`uri`) fatalErrors += 1; else: progress("Warning: No USD amount for "+`uri`+", assuming USD") if len(amounts) >1: if (cat_ns.Internal not in cats or len(amounts) != 2 ): fatalErrors += 1; progress( "Error: More than one USD amount %s for transaction %s -- ignoring!\n" % (`amounts`,uri)) else: sum = float(amounts[0]) + float(amounts[1]) if sum != 0: fatalErrors += 1; progress("Sum %f not zero for USD amounts %s for internal transaction %s.\n" % (sum, amounts, uri)) continue if len(amounts) != 1: progress("@@@ Error: No amount for "+`uri`); fatalErrors += 1; ss = kb.statementsMatching(subj=s) progress(`ss`+'; KB='+`kb.n3String()`) continue amount = float(amounts[0].__str__()) # print "%s %40s %10s month %i" %(date, payee, `amount`, month) monthTotals[month] = monthTotals[month] + amount if cat_ns.Internal not in cats: if amount > 0: incomeByMonth[month] = incomeByMonth[month] + amount income = income + amount else: outgoingsByMonth[month] = outgoingsByMonth[month] + amount outgoings = outgoings + amount normalCats = [] # For this item for c in cats: totals[c] = totals.get(c, 0) + amount byMonth[c] = byMonth.get(c, [0] * numberOfMonths) count[c] = count.get(c, 0) + 1 byMonth[c][month] = byMonth[c][month] + amount if c not in specialCategories: normalCats.append(c) bottomCats = normalCats[:] # Copy for b in normalCats: sups = kb.each(subj=b, pred=rdfs.subClassOf) for sup in sups: if sup in bottomCats: bottomCats.remove(sup) if len(bottomCats) == 0: noteError("No categoriy: %s for <%s>" # all cats: %s, raw cats:%s" %(`cats`, `s`)) # ,`cats`, `kb.each(subj=s, pred=rdf.type)`) elif bottomCats[0] not in bottomCategories and (bottomCats[0] not in [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing]): noteError("Be more specifc: %s for <%s>" %(`bottomCats[0]`, `s`)) # Won't get shown e.g. in year-cat.html if len(bottomCats) > 1: noteError("Inconsistent categories: %s" # all cats: %s, raw cats:%s" %(`bottomCats`)) # ,`cats`, `kb.each(subj=s, pred=rdf.type)`) print '<html xmlns="http://www.w3.org/1999/xhtml">' if '--summary' in sys.argv: title = "Monthly summary" elif '--issues' in sys.argv: title = "Issues" else: title = "Report" print """<head> <meta charset='UTF-8'> <title>%s</title> <link rel="Stylesheet" href="report.css"> </head> <body> """ % (title) # <img src="sand-dollar.gif" alt="dollar" align="right"/> version = "$Id$" # SUMMARY TABLE OF CATEGORY BY MONTH if '--summary' in sys.argv: print "<h2>Personal categories and months %s - %s</h2>" % (startDate, endDate) print "<table class='wide' style='border-collapse:collapse; border: 0.01em solid #aaa; text-align: right' ><col style='text-align: left'>" print "<tr><th></th><th>Total </th>" for month in range(numberOfMonths): m = month + int(startDate[5:7]) - 1 while m > 11: m -= 12 # Modulo in python? print "<th><a href='year-chron.html#m%s'>%s</a></th>" %(("0"+`m+1`)[-2:], monthName[m]), print "</tr>" def listFor(c, depth=0): # Any, because there could be 2 copies of same list :-( subs = kb.any(subj = c, pred = owl.disjointUnionOf); res = [ (c, depth) ]; if subs == None: subs = kb.each(pred = rdfs.subClassOf, obj = c); if len(subs) > 0: sys.stderr.write( "Warning: for %s: no disjointUnionOf but subclasses %s\n" %(`c`, `subs`)) for sub in subs: res += listFor(sub, depth+1) else: for sub in subs: res += listFor(sub, depth+1) return res printOrder = listFor(qu.Transaction); for cat, depth in printOrder: label = kb.the(subj=cat, pred=rdfs.label) if label == None: label = `cat` sys.stderr.write("@@ No label for "+`cat` +"\n") else: label = str(label) anchor = cat.fragid if totals.get(cat, None) != None: print monthGridRow(anchor, anchor, totals[cat], byMonth.get(cat, [0] * numberOfMonths), numberOfMonths, indent = depth) print "<tr><td colspan='14'> ___ </td></tr>" print monthGridRow("Income", None, income, incomeByMonth, numberOfMonths) print monthGridRow("Outgoings", None, outgoings, outgoingsByMonth, numberOfMonths) print monthGridRow("Balance", None, income + outgoings, monthTotals, numberOfMonths) print "</table>" # Chart of income stacked up against expenses if '--charts' in sys.argv: print "<p><a href='chart.svg'><p>Chart of day-day income vs expense</p><img src='chart.svg'></a></p>" print "<p><a href='all.svg'><p>Chart of all income vs expense</p><img src='all.svg'></a></p>" writeChart(filename = "chart.svg", categories = bottomCategories + [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing], totals = totals, income=income, outgoings=outgoings, shortTerm = 1) writeChart(filename = "all.svg", categories = bottomCategories + [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing], totals = totals, income=income, outgoings=outgoings, shortTerm = 0) # Output totals if (totalsFilename): ko = kb.newFormula() for c in quCategories + [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing]: ko.add(subj=c, pred=qu.total, obj=("%7.2f" % totals.get(c,0))) ko.add(subj=qu.Transaction, pred=qu.total, obj=("%7.2f" % (income + outgoings))) ko.close() fo = open(totalsFilename, "w") fo.write(ko.n3String()) fo.close if '--issues' in sys.argv: # Generate a list of errors found errstr = "" for x, list in errors.items(): errstr += transactionRow(x) for e in list: errstr += "<tr><td colspan='4'>"+`e`+"</td></tr>\n" # @@@ encode error string if errstr: print "<h2>Inconsistencies</h2><table>\n" + errstr + "</table>\n" # List Unclassified Income and Spending def transactionList(cat): ts = kb.each(pred = rdf.type, obj = cat) if len(ts) == 0: return "" label = kb.any(cat, rdfs.label) st = '<h2>'+label.value()+'</h2>\n' return st + transactionTable(ts) for cat in [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing]: print transactionList(cat) print reimbursablesCheck(); internalCheck() if 0: print "<h2>Tax Categories</h2>" taxCategories = kb.each(pred=rdf_type, obj=tax.Category) printCategoryTotalsOnly(taxCategories + [ qu.Unclassified], totals, count) print "<h2>Tax stuff</h2>" print "<table>" print "<tr><th>-<th>Form line</th><th>amount</th></tr>" print "</table>" # print "<h2>Personal Category total</h2>" # printCategoryTotalsOnly(quCategories + [ qu.Unclassified], totals, count) print print "Note totals for tax and personal breakdowns must match." dates = kb.statementsMatching(pred=qu.date) print "There should be a total of %i transactions in each." % len(dates) if 0: print "<pre>(consistency check)" problems = 0 for s in dates: tra = s.subject() types = kb.each(subj=tra, pred=rdf_type) for typ in types: if typ is qu.Unclassified or typ is qu.Classified: break # ok else: print "@@@@ problem transcation with no classified or unclassified, with types", types printTransactionDetails(tra) problems = problems + 1 print problems, "problems.</pre>" print "</body></html>" return fatalErrors
def doCommand(): """Command line RDF/N3 tool <command> <options> <steps> [--with <more args> ] options: --pipe Don't store, just pipe out * steps, in order left to right: --rdf Input & Output ** in RDF/XML insead of n3 from now on --n3 Input & Output in N3 from now on. (Default) --rdf=flags Input & Output ** in RDF and set given RDF flags --n3=flags Input & Output in N3 and set N3 flags --ntriples Input & Output in NTriples (equiv --n3=usbpartane -bySubject -quiet) --language=x Input & Output in "x" (rdf, n3, etc) --rdf same as: --language=rdf --languageOptions=y --n3=sp same as: --language=n3 --languageOptions=sp --ugly Store input and regurgitate, data only, fastest * --bySubject Store input and regurgitate in subject order * --no No output * (default is to store and pretty print with anonymous nodes) * --base=<uri> Set the base URI. Input or output is done as though theis were the document URI. --closure=flags Control automatic lookup of identifiers (see below) <uri> Load document. URI may be relative to current directory. --apply=foo Read rules from foo, apply to store, adding conclusions to store --patch=foo Read patches from foo, applying insertions and deletions to store --filter=foo Read rules from foo, apply to store, REPLACING store with conclusions --query=foo Read a N3QL query from foo, apply it to the store, and replace the store with its conclusions --sparql=foo Read a SPARQL query from foo, apply it to the store, and replace the store with its conclusions --rules Apply rules in store to store, adding conclusions to store --think as -rules but continue until no more rule matches (or forever!) --engine=otter use otter (in your $PATH) instead of llyn for linking, etc --why Replace the store with an explanation of its contents --why=u proof tries to be shorter --mode=flags Set modus operandi for inference (see below) --reify Replace the statements in the store with statements describing them. --dereify Undo the effects of --reify --flatten Reify only nested subexpressions (not top level) so that no {} remain. --unflatten Undo the effects of --flatten --think=foo as -apply=foo but continue until no more rule matches (or forever!) --purge Remove from store any triple involving anything in class log:Chaff --data Remove all except plain RDF triples (formulae, forAll, etc) --strings Dump :s to stdout ordered by :k whereever { :k log:outputString :s } --crypto Enable processing of crypto builtin functions. Requires python crypto. --help print this message --revision print CVS revision numbers of major modules --chatty=50 Verbose debugging output of questionable use, range 0-99 --sparqlServer instead of outputting, start a SPARQL server on port 8000 of the store --sparqlResults After sparql query, print in sparqlResults format instead of rdf finally: --with Pass any further arguments to the N3 store as os:argv values * mutually exclusive ** doesn't work for complex cases :-/ Examples: cwm --rdf foo.rdf --n3 --pipe Convert from rdf/xml to rdf/n3 cwm foo.n3 bar.n3 --think Combine data and find all deductions cwm foo.n3 --flat --n3=spart Mode flags affect inference extedning to the web: r Needed to enable any remote stuff. a When reading schema, also load rules pointed to by schema (requires r, s) E Errors loading schemas of definitive documents are ignored m Schemas and definitive documents laoded are merged into the meta knowledge (otherwise they are consulted independently) s Read the schema for any predicate in a query. u Generate unique ids using a run-specific Closure flags are set to cause the working formula to be automatically exapnded to the closure under the operation of looking up: s the subject of a statement added p the predicate of a statement added o the object of a statement added t the object of an rdf:type statement added i any owl:imports documents r any doc:rules documents E errors are ignored --- This is independant of --mode=E n Normalize IRIs to URIs e Smush together any nodes which are = (owl:sameAs) See http://www.w3.org/2000/10/swap/doc/cwm for more documentation. Setting the environment variable CWM_RDFLIB to 1 maked Cwm use rdflib to parse rdf/xml files. Note that this requires rdflib. """ import time import sys from swap import myStore # These would just be attributes if this were an object global _store global workingContext option_need_rdf_sometime = 0 # If we don't need it, don't import it # (to save errors where parsers don't exist) option_pipe = 0 # Don't store, just pipe though option_inputs = [] option_reify = 0 # Flag: reify on output (process?) option_flat = 0 # Flag: reify on output (process?) option_crypto = 0 # Flag: make cryptographic algorithms available setTracking(0) option_outURI = None option_outputStyle = "-best" _gotInput = 0 # Do we not need to take input from stdin? option_meta = 0 option_normalize_iri = 0 option_flags = {"rdf": "l", "n3": "", "think": "", "sparql": ""} # RDF/XML serializer can't do list ("collection") syntax. option_quiet = 0 option_with = None # Command line arguments made available to N3 processing option_engine = "llyn" option_why = "" _step = 0 # Step number used for metadata _genid = 0 hostname = "localhost" # @@@@@@@@@@@ Get real one # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() option_format = "n3" # set the default format option_first_format = None _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # First pass on command line - - - - - - - P A S S 1 for argnum in range(1, len(sys.argv)): # options after script name arg = sys.argv[argnum] if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to - # _equals = string.find(arg, "=") _lhs = "" _rhs = "" try: [_lhs, _rhs] = arg.split('=', 1) try: _uri = join(option_baseURI, _rhs) except ValueError: _uri = _rhs except ValueError: pass if arg == "-ugly": option_outputStyle = arg elif _lhs == "-base": option_baseURI = _uri elif arg == "-rdf": option_format = "rdf" if option_first_format == None: option_first_format = option_format option_need_rdf_sometime = 1 elif _lhs == "-rdf": option_format = "rdf" if option_first_format == None: option_first_format = option_format option_flags["rdf"] = _rhs option_need_rdf_sometime = 1 elif arg == "-n3": option_format = "n3" if option_first_format == None: option_first_format = option_format elif _lhs == "-n3": option_format = "n3" if option_first_format == None: option_first_format = option_format option_flags["n3"] = _rhs elif _lhs == "-mode": option_flags["think"] = _rhs elif _lhs == "-closure": if "n" in _rhs: option_normalize_iri = 1 #elif _lhs == "-solve": # sys.argv[argnum+1:argnum+1] = ['-think', '-filter=' + _rhs] elif _lhs == "-language": option_format = _rhs if option_first_format == None: option_first_format = option_format elif _lhs == "-languageOptions": option_flags[option_format] = _rhs elif arg == "-quiet": option_quiet = 1 elif arg == "-pipe": option_pipe = 1 elif arg == "-crypto": option_crypto = 1 elif _lhs == "-why": diag.tracking = 1 diag.setTracking(1) option_why = _rhs elif arg == "-why": diag.tracking = 1 diag.setTracking(1) option_why = "" elif arg == "-track": diag.tracking = 1 diag.setTracking(1) elif arg == "-bySubject": option_outputStyle = arg elif arg == "-no": option_outputStyle = "-no" elif arg == "-debugString": option_outputStyle = "-debugString" elif arg == "-strings": option_outputStyle = "-no" elif arg == "-sparqlResults": option_outputStyle = "-no" elif arg == "-triples" or arg == "-ntriples": option_format = "n3" option_flags["n3"] = "bravestpun" option_outputStyle = "-bySubject" option_quiet = 1 elif _lhs == "-outURI": option_outURI = _uri elif _lhs == "-chatty": setVerbosity(int(_rhs)) elif arg[:7] == "-apply=": pass elif arg[:7] == "-patch=": pass elif arg == "-reify": option_reify = 1 elif arg == "-flat": option_flat = 1 elif arg == "-help": print doCommand.__doc__ print notation3.ToN3.flagDocumentation print toXML.ToRDF.flagDocumentation try: from swap import sax2rdf # RDF1.0 syntax parser to N3 RDF stream print sax2rdf.RDFXMLParser.flagDocumentation except: pass return elif arg == "-revision": progress("cwm=", cvsRevision, "llyn=", llyn.cvsRevision) return elif arg == "-with": option_with = sys.argv[argnum + 1:] # The rest of the args are passed to n3 break elif arg[0] == "-": pass # Other option else: option_inputs.append(join(option_baseURI, arg)) _gotInput = _gotInput + 1 # input filename # Between passes, prepare for processing setVerbosity(0) if not option_normalize_iri: llyn.canonical = lambda x: x # Base defauts if option_baseURI == _baseURI: # Base not specified explicitly - special case if _outURI == _baseURI: # Output name not specified either if _gotInput == 1: # But input file *is*, _outURI = option_inputs[0] # Just output to same URI option_baseURI = _outURI # using that as base. if diag.tracking: _outURI = RDFSink.runNamespace()[:-1] option_baseURI = _outURI option_baseURI = splitFrag(option_baseURI)[0] # Fix the output sink if option_format == "rdf": _outSink = toXML.ToRDF(sys.stdout, _outURI, base=option_baseURI, flags=option_flags["rdf"]) elif option_format == "n3" or option_format == "sparql": _outSink = notation3.ToN3(sys.stdout.write, base=option_baseURI, quiet=option_quiet, flags=option_flags["n3"]) elif option_format == "trace": _outSink = RDFSink.TracingRDFSink(_outURI, base=option_baseURI, flags=option_flags.get("trace", "")) if option_pipe: # this is really what a parser wants to dump to _outSink.backing = llyn.RDFStore(_outURI + "#_g", argv=option_with, crypto=option_crypto) else: # this is really what a store wants to dump to _outSink.backing = notation3.ToN3(sys.stdout.write, base=option_baseURI, quiet=option_quiet, flags=option_flags["n3"]) # hm. why does TimBL use sys.stdout.write, above? performance at the else: raise NotImplementedError version = "$Id: cwm.py,v 1.198 2012-01-30 09:30:20 timbl Exp $" if not option_quiet and option_outputStyle != "-no": _outSink.makeComment("Processed by " + version[1:-1]) # Strip $ to disarm _outSink.makeComment(" using base " + option_baseURI) if option_flat: _outSink = notation3.Reifier(_outSink, _outURI + "#_formula", flat=1) if diag.tracking: myReason = BecauseOfCommandLine( ` sys.argv `) # @@ add user, host, pid, pwd, date time? Privacy! else: myReason = None if option_pipe: _store = _outSink workingContext = _outSink #.newFormula() else: if "u" in option_flags["think"]: _store = llyn.RDFStore(argv=option_with, crypto=option_crypto) else: _store = llyn.RDFStore(_outURI + "#_g", argv=option_with, crypto=option_crypto) myStore.setStore(_store) if _gotInput: workingContext = _store.newFormula(option_inputs[0] + "#_work") newTopLevelFormula(workingContext) else: # default input if option_first_format is None: option_first_format = option_format ContentType = { "rdf": "application/xml+rdf", "n3": "text/n3", "sparql": "x-application/sparql" }[option_first_format] workingContext = _store.load( # asIfFrom = join(_baseURI, ".stdin"), asIfFrom=_baseURI, contentType=ContentType, flags=option_flags[option_first_format], remember=0, referer="", why=myReason, topLevel=True) workingContext.reopen() workingContext.stayOpen = 1 # Never canonicalize this. Never share it. # ____________________________________________________________________ # Take commands from command line:- - - - - P A S S 2 option_format = "n3" # Use RDF/n3 rather than RDF/XML option_flags = {"rdf": "l", "n3": "", "think": "", "sparql": ""} option_quiet = 0 _outURI = _baseURI option_baseURI = _baseURI # To start with def filterize(): """implementation of --filter for the --filter command, so we don't have it printed twice """ global workingContext global r workingContext = workingContext.canonicalize() _store._formulaeOfLength = {} filterContext = _store.newFormula() newTopLevelFormula(filterContext) _store.load(_uri, openFormula=filterContext, why=myReason, referer="") _newContext = _store.newFormula() newTopLevelFormula(_newContext) applyRules(workingContext, filterContext, _newContext) workingContext.close() workingContext = _newContext sparql_query_formula = None for arg in sys.argv[1:]: # Command line options after script name if verbosity() > 5: progress("Processing %s." % (arg)) if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to - _equals = string.find(arg, "=") _lhs = "" _rhs = "" if _equals >= 0: _lhs = arg[:_equals] _rhs = arg[_equals + 1:] try: _uri = join(option_baseURI, _rhs) except ValueError: _uri = _rhs if arg[0] != "-": _inputURI = join(option_baseURI, splitFrag(arg)[0]) assert ':' in _inputURI ContentType = { "rdf": "application/xml+rdf", "n3": "text/n3", "sparql": "x-application/sparql" }[option_format] if not option_pipe: workingContext.reopen() try: load(_store, _inputURI, openFormula=workingContext, contentType=ContentType, flags=option_flags[option_format], referer="", why=myReason) except: progress(_inputURI) raise _gotInput = 1 elif arg == "-help": pass # shouldn't happen elif arg == "-revision": pass elif _lhs == "-base": option_baseURI = _uri if verbosity() > 10: progress("Base now " + option_baseURI) elif arg == "-ugly": option_outputStyle = arg elif arg == "-crypto": pass elif arg == "-pipe": pass elif _lhs == "-outURI": option_outURI = _uri elif arg == "-rdf": option_format = "rdf" elif _lhs == "-rdf": option_format = "rdf" option_flags["rdf"] = _rhs elif _lhs == "-mode": option_flags["think"] = _rhs elif _lhs == "-closure": workingContext.setClosureMode(_rhs) elif arg == "-n3": option_format = "n3" elif _lhs == "-n3": option_format = "n3" option_flags["n3"] = _rhs elif _lhs == "-language": option_format = _rhs if option_first_format == None: option_first_format = option_format elif _lhs == "-languageOptions": option_flags[option_format] = _lhs elif arg == "-quiet": option_quiet = 1 elif _lhs == "-chatty": setVerbosity(int(_rhs)) elif arg[:7] == "-track=": diag.tracking = int(_rhs) elif option_pipe: ############## End of pipable options print "# Command line error: %s illegal option with -pipe", arg break elif arg == "-triples" or arg == "-ntriples": option_format = "n3" option_flags["n3"] = "spartan" option_outputStyle = "-bySubject" option_quiet = 1 elif arg == "-bySubject": option_outputStyle = arg elif arg == "-debugString": option_outputStyle = arg elif arg[:7] == "-apply=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() applyRules(workingContext, filterContext) elif arg[:7] == "-apply=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() applyRules(workingContext, filterContext) elif arg[:7] == "-patch=": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) workingContext.reopen() patch(workingContext, filterContext) elif _lhs == "-filter": filterize() elif _lhs == "-query": workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, flags=option_flags[option_format], referer="", why=myReason, topLevel=True) _newContext = _store.newFormula() applyQueries(workingContext, filterContext, _newContext) workingContext.close() workingContext = _newContext elif _lhs == "-sparql": workingContext.stayOpen = False workingContext = workingContext.canonicalize() filterContext = _store.load(_uri, why=myReason, referer="", contentType="x-application/sparql") _newContext = _store.newFormula() _newContext.stayOpen = True sparql_query_formula = filterContext applySparqlQueries(workingContext, filterContext, _newContext) # workingContext.close() workingContext = _newContext elif _lhs == "-why" or arg == "-why": workingContext.stayOpen = False workingContext = workingContext.close() workingContext = explainFormula(workingContext, option_why) # Can't prove proofs diag.tracking = 0 diag.setTracking(0) elif arg == "-dump": workingContext = workingContext.canonicalize() progress("\nDump of working formula:\n" + workingContext.debugString()) elif arg == "-purge": workingContext.reopen() _store.purge(workingContext) elif arg == "-purge-rules" or arg == "-data": workingContext.reopen() _store.purgeExceptData(workingContext) elif arg == "-rules": workingContext.reopen() applyRules(workingContext, workingContext) elif arg[:7] == "-think=": filterContext = _store.load(_uri, referer="", why=myReason, topLevel=True) if verbosity() > 4: progress("Input rules to --think from " + _uri) workingContext.reopen() think(workingContext, filterContext, mode=option_flags["think"]) elif arg[:7] == "-solve=": # --solve is a combination of --think and --filter. think(workingContext, mode=option_flags["think"]) filterize() elif _lhs == "-engine": option_engine = _rhs elif arg == "-think": workingContext.isWorkingContext = True think(workingContext, mode=option_flags["think"]) elif arg == '-rete': from swap import pycwmko pythink = pycwmko.directPychinkoQuery(workingContext) #return #pythink() """ from pychinko import interpreter from swap.set_importer import Set, ImmutableSet pyf = pycwmko.N3Loader.N3Loader() conv = pycwmko.ToPyStore(pyf) conv.statements(workingContext) interp = interpreter.Interpreter(pyf.rules[:]) interp.addFacts(Set(pyf.facts), initialSet=True) interp.run() pyf.facts = interp.totalFacts workingContext = workingContext.store.newFormula() reconv = pycwmko.FromPyStore(workingContext, pyf) reconv.run() """ elif arg == '-sparqlServer': from swap.sparql import webserver from swap import cwm_sparql sandBoxed(True) workingContext.stayOpen = False workingContext = workingContext.canonicalize() def _handler(s): return cwm_sparql.sparql_queryString(workingContext, s) webserver.sparql_handler = _handler webserver.run() elif arg == "-lxkbdump": # just for debugging raise NotImplementedError elif arg == "-lxfdump": # just for debugging raise NotImplementedError elif _lhs == "-prove": # code copied from -filter without really being understood -sdh _tmpstore = llyn.RDFStore(_outURI + "#_g", metaURI=_metaURI, argv=option_with, crypto=option_crypto) tmpContext = _tmpstore.newFormula(_uri + "#_formula") _newURI = join(_baseURI, "_w_" + ` _genid `) # Intermediate _genid = _genid + 1 _newContext = _tmpstore.newFormula(_newURI + "#_formula") _tmpstore.loadURI(_uri) print targetkb elif arg == "-flatten": #raise NotImplementedError from swap import reify workingContext = reify.flatten(workingContext) elif arg == "-unflatten": from swap import reify workingContext = reify.unflatten(workingContext) #raise NotImplementedError elif arg == "-reify": from swap import reify workingContext = reify.reify(workingContext) elif arg == "-dereify": from swap import reify workingContext = reify.dereify(workingContext) elif arg == "-size": progress("Size: %i statements in store, %i in working formula." % (_store.size, workingContext.size())) elif arg == "-strings": # suppress output workingContext.outputStrings() option_outputStyle = "-no" elif arg == '-sparqlResults': from cwm_sparql import outputString, SPARQL_NS ns = _store.newSymbol(SPARQL_NS) if not sparql_query_formula: raise ValueError('No query') else: sys.stdout.write( outputString(sparql_query_formula, workingContext)[0].encode('utf_8')) option_outputStyle = "-no" elif arg == "-no": # suppress output option_outputStyle = arg elif arg[:8] == "-outURI=": pass elif arg == "-with": break else: progress("cwm: Unknown option: " + arg) sys.exit(-1) # Squirt it out if not piped workingContext.stayOpen = 0 # End its use as an always-open knoweldge base if option_pipe: workingContext.endDoc() else: if hasattr(_outSink, "serializeKB"): raise NotImplementedError else: if verbosity() > 5: progress("Begining output.") workingContext = workingContext.close() assert workingContext.canonical != None if option_outputStyle == "-ugly": _store.dumpChronological(workingContext, _outSink) elif option_outputStyle == "-bySubject": _store.dumpBySubject(workingContext, _outSink) elif option_outputStyle == "-no": pass elif option_outputStyle == "-debugString": print workingContext.debugString() else: # "-best" _store.dumpNested(workingContext, _outSink, flags=option_flags[option_format])
def main(): global already, agenda, errors parseAs = None grammarFile = None parseFile = None yaccFile = None global verbose global g verbose = 0 lumped = 1 try: opts, args = getopt.getopt(sys.argv[1:], "ha:v:p:g:y:", ["help", "as=", "verbose=", "parse=", "grammar=", "yacc="]) except getopt.GetoptError: usage() sys.exit(2) output = None for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() if o in ("-v", "--verbose"): verbose =int(a) diag.chatty_flag = int(a) if o in ("-a", "--as"): parseAs = uripath.join(uripath.base(), a) if o in ("-p", "--parse"): parseFile = uripath.join(uripath.base(), a) if o in ("-g", "--grammar"): grammarFile = uripath.join(uripath.base(), a) if o in ("-y", "--yacc"): yaccFile = uripath.join(uripath.base(), a)[5:] # strip off file: # if testFiles == []: testFiles = [ "/dev/stdin" ] if not parseAs: usage() sys.exit(2) parseAs = uripath.join(uripath.base(), parseAs) if not grammarFile: grammarFile = parseAs.split("#")[0] # strip off fragid else: grammarFile = uripath.join(uripath.base(), grammarFile) # The Grammar formula progress("Loading " + grammarFile) start = clock() g = load(grammarFile) taken = clock() - start + 1 progress("Loaded %i statements in %fs, ie %f/s." % (len(g), taken, len(g)/taken)) document = g.newSymbol(parseAs) already = [] agenda = [] errors = [] doProduction(document) while agenda: x = agenda[0] agenda = agenda[1:] already.append(x) doProduction(x) if errors != []: progress("###### FAILED with %i errors." % len(errors)) for s in errors: progress ("\t%s" % s) exit(-2) else: progress( "Ok for predictive parsing") #if parser.verb: progress "Branch table:", branchTable if verbose: progress( "Literal terminals: %s" % literalTerminals.keys()) progress("Token regular expressions:") for r in tokenRegexps: progress( "\t%s matches %s" %(r, tokenRegexps[r].pattern) ) if yaccFile: yacc=open(yaccFile, "w") yaccConvert(yacc, document, tokenRegexps) yacc.close() if parseFile == None: exit(0) ip = webAccess.urlopenForRDF(parseFile, None) str = ip.read().decode('utf_8') sink = g.newFormula() keywords = g.each(pred=BNF.keywords, subj=document) keywords = [a.value() for a in keywords] p = PredictiveParser(sink=sink, top=document, branchTable= branchTable, tokenRegexps= tokenRegexps, keywords = keywords) p.verb = verbose start = clock() p.parse(str) taken = clock() - start + 1 progress("Loaded %i chars in %fs, ie %f/s." % (len(str), taken, len(str)/taken)) progress("Parsed <%s> OK" % parseFile) sys.exit(0) # didn't crash
a = (t - t1) / (t1 - t2) lat = lat1 + a * (lat2 - lat1) long = long1 + a * (long2 - long1) progress("%s: Before (%f, %f)" % (dt1, lat1, long1)) progress("%s: Guess (%f, %f)" % (dt, lat, long)) progress("%s: After (%f, %f)" % (dt2, lat2, long2)) where = conclusions.newBlankNode() conclusions.add(ph, GPS.approxLocation, where) conclusions.add(where, WGS.lat, lat) conclusions.add(where, WGS.long, long) # guess = isodate.fullString(...) progress("Start Output") print conclusions.close().n3String(base=base()) svgStream = open("map.svg", "w") map = Map(minla, maxla, minlo, maxlo, svgStream=svgStream) pathpoint = None for i in range(n): date, ty, da = events[i] if ty == "T": # Trackpoint (la, lo) = float(da[0]), float(da[1]) if pathpoint == None: map.startPath(lo, la, date) pathpoint = 1 else: map.straightPath(lo, la, date) elif ty == "P":
def main(): global verbose, proofs, chatty, normal, no_action start = 1 cwm_command='../cwm.py' python_command='python -tt' global ploughOn # even if error ploughOn = 0 global verbose verbose = 0 global just_fix_it just_fix_it = 0 if diag.print_all_file_names: a = file('testfilelist','w') a.write('') a.close() try: opts, testFiles = getopt.getopt(sys.argv[1:], "h?s:nNcipf:v", ["help", "start=", "testsFrom=", "no-action", "No-normal", "chatty", "ignoreErrors", "proofs", "verbose","overwrite","cwm="]) except getopt.GetoptError: # print help information and exit: usage() sys.exit(2) output = None for o, a in opts: if o in ("-h", "-?", "--help"): usage() sys.exit() if o in ("-v", "--verbose"): verbose = 1 if o in ("-i", "--ignoreErrors"): ploughOn = 1 if o in ("-s", "--start"): start = int(a) if o in ("-f", "--testsFrom"): testFiles.append(a) if o in ("-n", "--no-action"): no_action = 1 if o in ("-N", "--No-normal"): normal = 0 if o in ("-c", "--chatty"): chatty = 1 if o in ("-p", "--proofs"): proofs = 1 if o in ("--overwrite",): just_fix_it = 1 if o in ("--cwm", "--the_end"): cwm_command=a assert system("mkdir -p ,temp") == 0 assert system("mkdir -p ,diffs") == 0 if proofs: assert system("mkdir -p ,proofs") == 0 tests=0 passes=0 global problems problems = [] REFWD="http://example.com/swap/test" WD = base()[:-1] #def basicTest(case, desc, args) if verbose: progress("Test files:", testFiles) kb = loadMany(testFiles, referer="") testData = [] RDFTestData = [] RDFNegativeTestData = [] perfData = [] n3PositiveTestData = [] n3NegativeTestData = [] sparqlTestData = [] # for fn in testFiles: # print "Loading tests from", fn # kb=load(fn) for t in kb.each(pred=rdf.type, obj=test.CwmTest): verboseDebug = kb.contains(subj=t, pred=rdf.type, obj=test.VerboseTest) u = t.uriref() ref = kb.the(t, test.referenceOutput) if ref == None: case = str(kb.the(t, test.shortFileName)) refFile = "ref/%s" % case else: refFile = refTo(base(), ref.uriref()) case = "" for ch in refFile: if ch in "/#": case += "_" else: case += ch # Make up test-unique temp filename description = str(kb.the(t, test.description)) arguments = str(kb.the(t, test.arguments)) environment = kb.the(t, test.environment) if environment == None: env="" else: env = str(environment) + " " testData.append((t, t.uriref(), case, refFile, description, env, arguments, verboseDebug)) for t in kb.each(pred=rdf.type, obj=rdft.PositiveParserTest): x = t.uriref() y = x.find("/rdf-tests/") x = x[y+11:] # rest for i in range(len(x)): if x[i]in"/#": x = x[:i]+"_"+x[i+1:] case = "rdft_" + x + ".nt" # Hack - temp file name description = str(kb.the(t, rdft.description)) # if description == None: description = case + " (no description)" inputDocument = kb.the(t, rdft.inputDocument).uriref() outputDocument = kb.the(t, rdft.outputDocument).uriref() status = kb.the(t, rdft.status).string good = 1 if status != "APPROVED": if verbose: print "\tNot approved: "+ inputDocument[-40:] good = 0 categories = kb.each(t, rdf.type) for cat in categories: if cat is triage.ReificationTest: if verbose: print "\tNot supported (reification): "+ inputDocument[-40:] good = 0 ## if cat is triage.ParseTypeLiteralTest: ## if verbose: print "\tNot supported (Parse type literal): "+ inputDocument[-40:] ## good = 0 if good: RDFTestData.append((t.uriref(), case, description, inputDocument, outputDocument)) for t in kb.each(pred=rdf.type, obj=rdft.NegativeParserTest): x = t.uriref() y = x.find("/rdf-tests/") x = x[y+11:] # rest for i in range(len(x)): if x[i]in"/#": x = x[:i]+"_"+x[i+1:] case = "rdft_" + x + ".nt" # Hack - temp file name description = str(kb.the(t, rdft.description)) # if description == None: description = case + " (no description)" inputDocument = kb.the(t, rdft.inputDocument).uriref() status = kb.the(t, rdft.status).string good = 1 if status != "APPROVED": if verbose: print "\tNot approved: "+ inputDocument[-40:] good = 0 categories = kb.each(t, rdf.type) for cat in categories: if cat is triage.knownError: if verbose: print "\tknown failure: "+ inputDocument[-40:] good = 0 if cat is triage.ReificationTest: if verbose: print "\tNot supported (reification): "+ inputDocument[-40:] good = 0 if good: RDFNegativeTestData.append((t.uriref(), case, description, inputDocument)) for t in kb.each(pred=rdf.type, obj=n3test.PositiveParserTest): u = t.uriref() hash = u.rfind("#") slash = u.rfind("/") assert hash >0 and slash > 0 case = u[slash+1:hash] + "_" + u[hash+1:] + ".out" # Make up temp filename description = str(kb.the(t, n3test.description)) # if description == None: description = case + " (no description)" inputDocument = kb.the(t, n3test.inputDocument).uriref() good = 1 categories = kb.each(t, rdf.type) for cat in categories: if cat is triage.knownError: if verbose: print "\tknown failure: "+ inputDocument[-40:] good = 0 if good: n3PositiveTestData.append((t.uriref(), case, description, inputDocument)) for t in kb.each(pred=rdf.type, obj=n3test.NegativeParserTest): u = t.uriref() hash = u.rfind("#") slash = u.rfind("/") assert hash >0 and slash > 0 case = u[slash+1:hash] + "_" + u[hash+1:] + ".out" # Make up temp filename description = str(kb.the(t, n3test.description)) # if description == None: description = case + " (no description)" inputDocument = kb.the(t, n3test.inputDocument).uriref() n3NegativeTestData.append((t.uriref(), case, description, inputDocument)) for tt in kb.each(pred=rdf.type, obj=sparql_manifest.Manifest): for t in kb.the(subj=tt, pred=sparql_manifest.entries): name = str(kb.the(subj=t, pred=sparql_manifest.name)) query_node = kb.the(subj=t, pred=sparql_manifest.action) if isinstance(query_node, AnonymousNode): data = '' for data_node in kb.each(subj=query_node, pred=sparql_query.data): data = data + ' ' + data_node.uriref() inputDocument = kb.the(subj=query_node, pred=sparql_query.query).uriref() else: data = '' inputDocument = query_node.uriref() j = inputDocument.rfind('/') case = inputDocument[j+1:] outputDocument = kb.the(subj=t, pred=sparql_manifest.result) if outputDocument: outputDocument = outputDocument.uriref() else: outputDocument = None good = 1 status = kb.the(subj=t, pred=dawg_test.approval) if status != dawg_test.Approved: print status, name if verbose: print "\tNot approved: "+ inputDocument[-40:] good = 0 if good: sparqlTestData.append((tt.uriref(), case, name, inputDocument, data, outputDocument)) for t in kb.each(pred=rdf.type, obj=test.PerformanceTest): x = t.uriref() theTime = kb.the(subj=t, pred=test.pyStones) description = str(kb.the(t, test.description)) arguments = str(kb.the(t, test.arguments)) environment = kb.the(t, test.environment) if environment == None: env="" else: env = str(environment) + " " perfData.append((x, theTime, description, env, arguments)) testData.sort() cwmTests = len(testData) if verbose: print "Cwm tests: %i" % cwmTests RDFTestData.sort() RDFNegativeTestData.sort() rdfTests = len(RDFTestData) rdfNegativeTests = len(RDFNegativeTestData) perfData.sort() perfTests = len(perfData) n3PositiveTestData.sort() n3PositiveTests = len(n3PositiveTestData) n3NegativeTestData.sort() n3NegativeTests = len(n3NegativeTestData) sparqlTestData.sort() sparqlTests = len(sparqlTestData) totalTests = cwmTests + rdfTests + rdfNegativeTests + sparqlTests \ + perfTests + n3PositiveTests + n3NegativeTests if verbose: print "RDF parser tests: %i" % rdfTests for t, u, case, refFile, description, env, arguments, verboseDebug in testData: tests = tests + 1 if tests < start: continue urel = refTo(base(), u) print "%3i/%i %-30s %s" %(tests, totalTests, urel, description) # print " %scwm %s giving %s" %(arguments, case) assert case and description and arguments cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e 's;%s;%s;g'""" % (WD, REFWD, cwm_command, '../cwm.py') if normal: execute("""CWM_RUN_NS="run#" %s %s %s --quiet %s | %s > ,temp/%s""" % (env, python_command, cwm_command, arguments, cleanup , case)) if diff(case, refFile): problem("######### from normal case %s: %scwm %s" %( case, env, arguments)) continue if chatty and not verboseDebug: execute("""%s %s %s --chatty=100 %s &> /dev/null""" % (env, python_command, cwm_command, arguments), noStdErr=True) if proofs and kb.contains(subj=t, pred=rdf.type, obj=test.CwmProofTest): execute("""%s %s %s --quiet %s --base=a --why > ,proofs/%s""" % (env, python_command, cwm_command, arguments, case)) execute("""%s ../check.py < ,proofs/%s | %s > ,temp/%s""" % (python_command, case, cleanup , case)) if diff(case, refFile): problem("######### from proof case %s: %scwm %s" %( case, env, arguments)) # else: # progress("No proof for "+`t`+ " "+`proofs`) # progress("@@ %s" %(kb.each(t,rdf.type))) passes = passes + 1 for u, case, name, inputDocument, data, outputDocument in sparqlTestData: tests += 1 if tests < start: continue urel = refTo(base(), u) print "%3i/%i %-30s %s" %(tests, totalTests, urel, name) inNtriples = case + '_1' outNtriples = case + '_2' try: execute("""%s %s %s --sparql=%s --filter=%s --filter=%s --ntriples > ',temp/%s'""" % (python_command, cwm_command, data, inputDocument, 'sparql/filter1.n3', 'sparql/filter2.n3', inNtriples)) except NotImplementedError: pass except: problem(str(sys.exc_info()[1])) if outputDocument: execute("""%s %s %s --ntriples > ',temp/%s'""" % (python_command, cwm_command, outputDocument, outNtriples)) if rdfcompare3(inNtriples, ',temp/' + outNtriples): problem('We have a problem with %s on %s' % (inputDocument, data)) passes += 1 for u, case, description, inputDocument, outputDocument in RDFTestData: tests = tests + 1 if tests < start: continue print "%3i/%i) %s %s" %(tests, totalTests, case, description) # print " %scwm %s giving %s" %(inputDocument, case) assert case and description and inputDocument and outputDocument # cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e '/^#/d' -e '/^ *$/d'""" % ( # WD, REFWD) execute("""%s %s --quiet --rdf=RT %s --ntriples > ,temp/%s""" % (python_command, cwm_command, inputDocument, case)) if rdfcompare3(case, localize(outputDocument)): problem(" from positive parser test %s running\n\tcwm %s\n" %( case, inputDocument)) passes = passes + 1 for u, case, description, inputDocument in RDFNegativeTestData: tests = tests + 1 if tests < start: continue print "%3i/%i) %s %s" %(tests, totalTests, case, description) # print " %scwm %s giving %s" %(inputDocument, case) assert case and description and inputDocument # cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e '/^#/d' -e '/^ *$/d'""" % ( # WD, REFWD) try: execute("""%s %s --quiet --rdf=RT %s --ntriples > ,temp/%s 2>/dev/null""" % (python_command, cwm_command, inputDocument, case)) except: pass else: problem("""I didn't get a parse error running python %s --quiet --rdf=RT %s --ntriples > ,temp/%s from test ^=%s I should have. """ % (cwm_command, inputDocument, case, u)) passes = passes + 1 for u, case, description, inputDocument in n3PositiveTestData: tests = tests + 1 if tests < start: continue print "%3i/%i) %s %s" %(tests, totalTests, case, description) # print " %scwm %s giving %s" %(inputDocument, case) assert case and description and inputDocument # cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e '/^#/d' -e '/^ *$/d'""" % ( # WD, REFWD) try: execute("""%s %s --grammar=../grammar/n3-selectors.n3 --as=http://www.w3.org/2000/10/swap/grammar/n3#document --parse=%s > ,temp/%s 2>/dev/null""" % (python_command, '../grammar/predictiveParser.py', inputDocument, case)) except RuntimeError: problem("""Error running ``python %s --grammar=../grammar/n3-selectors.n3 --as=http://www.w3.org/2000/10/swap/grammar/n3#document --parse=%s > ,temp/%s 2>/dev/null''""" % ('../grammar/predictiveParser.py', inputDocument, case)) passes = passes + 1 for u, case, description, inputDocument in n3NegativeTestData: tests = tests + 1 if tests < start: continue print "%3i/%i) %s %s" %(tests, totalTests, case, description) # print " %scwm %s giving %s" %(inputDocument, case) assert case and description and inputDocument # cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e '/^#/d' -e '/^ *$/d'""" % ( # WD, REFWD) try: execute("""%s %s ../grammar/n3-selectors.n3 http://www.w3.org/2000/10/swap/grammar/n3#document %s > ,temp/%s 2>/dev/null""" % (python_command, '../grammar/predictiveParser.py', inputDocument, case)) except: pass else: problem("""There was no error executing ``python %s --grammar=../grammar/n3-selectors.n3 --as=http://www.w3.org/2000/10/swap/grammar/n3#document --parse=%s > ,temp/%s'' There should have been one.""" % ('../grammar/predictiveParser.py', inputDocument, case)) passes = passes + 1 timeMatcher = re.compile(r'\t([0-9]+)m([0-9]+)\.([0-9]+)s') ## from test.pystone import pystones ## pyStoneTime = pystones()[1] for u, theTime, description, env, arguments in perfData: tests = tests + 1 if tests < start: continue urel = refTo(base(), u) print "%3i/%i %-30s %s" %(tests, totalTests, urel, description) tt = os.times()[-1] a = system("""%s %s %s --quiet %s >,time.out""" % (env, python_command, cwm_command, arguments)) userTime = os.times()[-1] - tt print """%spython %s --quiet %s 2>,time.out""" % \ (env, cwm_command, arguments) ## c = file(',time.out', 'r') ## timeOutput = c.read() ## c.close() ## timeList = [timeMatcher.search(b).groups() for b in timeOutput.split('\n') if timeMatcher.search(b) is not None] ## print timeList ## userTimeStr = timeList[1] ## userTime = int(userTimeStr[0])*60 + float(userTimeStr[1] + '.' + userTimeStr[2]) pyCount = pyStoneTime * userTime print pyCount if problems != []: sys.stderr.write("\nProblems:\n") for s in problems: sys.stderr.write(" " + s + "\n") raise RuntimeError("Total %i errors in %i tests." % (len(problems), tests))
def baseRel(uri): return uripath.refTo(uripath.base(), uri)
def doCommand(serialDevice=None, outputURI=None, doTracks=1, doWaypoints=1, verbose=0): if os.name == 'nt': if not serialDevice: serialDevice = "com1" phys = Win32SerialLink(serialDevice) else: if not serialDevice: serialDevice = "/dev/ttyS0" # serialDevice = "/dev/cu.USA19H191P1.1" phys = UnixSerialLink(serialDevice) gps = Garmin(phys) print "GPS Product ID: %d Descriptions: %s Software version: %2.2f" % \ (gps.prod_id, gps.prod_descs, gps.soft_ver) f = formula() # Empty store of RDF data base = uripath.base() record = f.newBlankNode() f.add(record, RDF.type, GPS.Record) if doWaypoints: # show waypoints if verbose: print "Getting waypoints" wpts = gps.getWaypoints() for w in wpts: if verbose: progress( ` w `) wpt = symbol(uripath.join(base, w.ident)) f.add(record, GPS.waypoint, wpt) f.add(wpt, WGS.lat, obj=intern(degrees(w.slat))) f.add(wpt, WGS.long, obj=intern(degrees(w.slon))) if doTracks: # show track if verbose: print "Getting tracks" tracks = gps.getTracks() for t in tracks: track = f.newBlankNode() f.add(record, GPS.track, track) for p in t: if isinstance(p, TrackHdr): if verbose: progress( ` p `) f.add(track, GPS.disp, intern(p.dspl)) f.add(track, GPS.color, intern(p.color)) f.add(track, GPS.trk_ident, intern(p.trk_ident)) else: if verbose: progress( ` p `) point = f.newBlankNode() f.add(track, GPS.trackpoint, point) f.add(point, WGS.lat, obj=intern(degrees(p.slat))) f.add(point, WGS.long, obj=intern(degrees(p.slon))) # if verbose: progress(" time=", p.time) # progress('p.time='+`p.time`) # @@ if p.time == 0 or p.time == 0xffffffffL: if verbose: progress("time=%8x, ignoring" % p.time) else: f.add(point, WGS.time, obj=intern(isodate.fullString(TimeEpoch + p.time))) phys.f.close() # Should really be done by the del() below, but isn't del (phys) # close serial link (?) f = f.close() if verbose: progress("Beginning output. You can disconnect the GPS now.") s = f.n3String( base=base, flags="d") # Flag - no default prefix, preserve gps: prefix hint if outputURI != None: op = open(outputURI, "w") op.write(s) op.close() else: print s
lat = lat1 + a * (lat2-lat1) long = long1 + a * (long2-long1) progress( "%s: Before (%f, %f)" % (dt1, lat1, long1)) progress( "%s: Guess (%f, %f)" % (dt, lat, long)) progress( "%s: After (%f, %f)" % (dt2, lat2, long2)) where = conclusions.newBlankNode() conclusions.add(ph, GPS.approxLocation, where) conclusions.add(where, WGS.lat, lat) conclusions.add(where, WGS.long, long) # guess = isodate.fullString(...) progress("Start Output") print conclusions.close().n3String(base=base()) svgStream = open("map.svg", "w") map = Map(minla, maxla, minlo, maxlo, svgStream=svgStream) pathpoint = None for i in range(n): date, ty, da = events[i] if ty == "T": # Trackpoint (la, lo) = float(da[0]), float(da[1]) if pathpoint == None: map.startPath(lo, la, date) pathpoint = 1 else: map.straightPath(lo, la, date) elif ty == "P":
def figureBalances(startDate, endDate, inputURIs=["/dev/stdin"]): global verbose # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # Load the data: kb = loadMany(inputURIs); rates = loadMany(["currencies.n3"]); sts = kb.statementsMatching(pred = OFX.BANKTRANLIST); #if verbose: # print len(sts), " bank transaction lists." lists = [] for st in sts: tl = st.object(); start = str(kb.any(tl, OFX.DTSTART))[:10]; end = str(kb.any(tl, OFX.DTEND))[:10]; # print "Transaction list %s - %s " %(start, end) lists.append((start, end, tl, st.subject())) lists.sort(reverse = 1); # lists.reverse(); balances = []; first = {}; g = {}; for s, e, t, stmtrs in lists: # Do one statement, working backward to get the balance each date ac = kb.any(stmtrs, OFX.BANKACCTFROM); if ac == None: ac = kb.the(stmtrs, OFX.CCACCTFROM); #info("ac = "+`ac`) acid = str(kb.the(ac, OFX.ACCTID))[-4:]; # info("Bank statment %s to %s for %s" % (s, e, acid)); # @@ ledgerBalance = kb.the(stmtrs, OFX.LEDGERBAL); curdef = kb.the(stmtrs, OFX.CURDEF).value(); currency = cur.sym(curdef); conversionRate = 1 if (curdef != "USD"): conversionRate = rates.the(currency, cur.in_USD).value(); # , None, kb.store.symbol(currencySource)).value(); balanceDate = str(kb.the(ledgerBalance, OFX.DTASOF))[:10]; balance = float(str(kb.the(ledgerBalance, OFX.BALAMT))) * conversionRate; transactionsThisStatement = []; for tran in kb.each(t, OFX.STMTTRN): transactionsThisStatement.append(( str(kb.the(tran, OFX.DTPOSTED))[:10], conversionRate * float(str(str(kb.the(tran, OFX.TRNAMT)))))); transactionsThisStatement.sort(); transactionsThisStatement.reverse(); bal, dat = float(str(balance)), balanceDate; for d, a in transactionsThisStatement: # assert dat >= d, "Ooops '%s' < '%s' %d, %d in %s" % (dat, d, len(dat), len(d), acid) # print "\t\t%10s %10s\t%s\t%10.2f\t%10.2f" % (d, dat, acid, a, bal) balances.append((d, dat, acid, bal)); bal = bal - a first[acid] = [d, bal]; dat = d balances.sort(); if verbose: info("First: " + `first`) return first, balances
from swap import diag from swap import uripath ### I used to have a main() function ### But that made the stack one longer ### Which changed the progress() output if __name__ == '__main__': diag.print_all_file_names = 1 import os import sys if False and len(sys.argv) > 1 and sys.argv[1] == 'delta': from delta import main sys.argv = sys.argv[:1] + sys.argv[2:] main() else: from cwm import doCommand sys.argv[0] = '../cwm.py' doCommand() file_list = diag.file_list file_list = [a for a in file_list if a[0:4] == 'file'] base = uripath.base() file_list = [uripath.refTo(base, a) for a in file_list] try: a = file('testfilelist', 'a') a.write('\n'.join(file_list)) a.write('\n') finally: a.close()
perfTests = len(perfData) n3PositiveTestData.sort() n3PositiveTests = len(n3PositiveTestData) n3NegativeTestData.sort() n3NegativeTests = len(n3NegativeTestData) sparqlTestData.sort() sparqlTests = len(sparqlTestData) totalTests = cwmTests + rdfTests + rdfNegativeTests + sparqlTests \ + perfTests + n3PositiveTests + n3NegativeTests if verbose: print "RDF parser tests: %i" % rdfTests for t, u, case, refFile, description, env, arguments, verboseDebug in testData: tests = tests + 1 if tests < start: continue urel = refTo(base(), u) print "%3i/%i %-30s %s" % (tests, totalTests, urel, description) # print " %scwm %s giving %s" %(arguments, case) assert case and description and arguments cleanup = """sed -e 's/\$[I]d.*\$//g' -e "s;%s;%s;g" -e '/@prefix run/d' -e 's;%s;%s;g'""" % ( WD, REFWD, cwm_command, '../cwm.py') if normal: execute( """CWM_RUN_NS="run#" %s %s %s --quiet %s | %s > ,temp/%s""" % (env, python_command, cwm_command, arguments, cleanup, case)) if diff(case, refFile): problem("######### from normal case %s: %scwm %s" % (case, env, arguments)) continue
from math import log, exp OFX = Namespace('http://www.w3.org/2000/10/swap/pim/ofx#'); qu = Namespace("http://www.w3.org/2000/10/swap/pim/qif#") rdf = Namespace(RDF_NS_URI) rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#") cur = Namespace("http://www.w3.org/2007/ont/currency#") # cat = Namespace("categories.n3#") info = lambda s: sys.stderr.write(s+'\n'); monthName= ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] currencySource = uripath.join(uripath.base(), "currencies.n3"); kb = None; def figureBalances(startDate, endDate, inputURIs=["/dev/stdin"]): global verbose # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # Load the data:
OFX = Namespace('http://www.w3.org/2000/10/swap/pim/ofx#') qu = Namespace("http://www.w3.org/2000/10/swap/pim/qif#") rdf = Namespace(RDF_NS_URI) rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#") cur = Namespace("http://www.w3.org/2007/ont/currency#") # cat = Namespace("categories.n3#") info = lambda s: sys.stderr.write(s + '\n') monthName = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ] currencySource = uripath.join(uripath.base(), "currencies.n3") kb = None def figureBalances(startDate, endDate, inputURIs=["/dev/stdin"]): global verbose # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # Load the data:
def doCommand(serialDevice=None, outputURI=None, doTracks=1, doWaypoints=1, verbose=0): if os.name == 'nt': if not serialDevice: serialDevice = "com1" phys = Win32SerialLink(serialDevice) else: if not serialDevice: serialDevice = "/dev/ttyS0" # serialDevice = "/dev/cu.USA19H191P1.1" phys = UnixSerialLink(serialDevice) gps = Garmin(phys) print "GPS Product ID: %d Descriptions: %s Software version: %2.2f" % \ (gps.prod_id, gps.prod_descs, gps.soft_ver) f = formula() # Empty store of RDF data base = uripath.base() record = f.newBlankNode() f.add(record, RDF.type, GPS.Record) if doWaypoints: # show waypoints if verbose: print "Getting waypoints" wpts = gps.getWaypoints() for w in wpts: if verbose: progress(`w`) wpt = symbol(uripath.join(base, w.ident)) f.add(record, GPS.waypoint, wpt) f.add(wpt, WGS.lat, obj=intern(degrees(w.slat))) f.add(wpt, WGS.long, obj=intern(degrees(w.slon))) if doTracks: # show track if verbose: print "Getting tracks" tracks = gps.getTracks() for t in tracks: track = f.newBlankNode() f.add(record, GPS.track, track) for p in t: if isinstance(p, TrackHdr): if verbose: progress(`p`) f.add(track, GPS.disp, intern(p.dspl)) f.add(track, GPS.color, intern(p.color)) f.add(track, GPS.trk_ident, intern(p.trk_ident)) else: if verbose: progress(`p`) point = f.newBlankNode() f.add(track, GPS.trackpoint, point) f.add(point, WGS.lat, obj=intern(degrees(p.slat))) f.add(point, WGS.long, obj=intern(degrees(p.slon))) # if verbose: progress(" time=", p.time) # progress('p.time='+`p.time`) # @@ if p.time == 0 or p.time == 0xffffffffL: if verbose: progress("time=%8x, ignoring" % p.time) else: f.add(point, WGS.time, obj=intern(isodate.fullString(TimeEpoch+p.time))) phys.f.close() # Should really be done by the del() below, but isn't del(phys) # close serial link (?) f = f.close() if verbose: progress("Beginning output. You can disconnect the GPS now.") s = f.n3String(base=base, flags="d") # Flag - no default prefix, preserve gps: prefix hint if outputURI != None: op = open(outputURI, "w") op.write(s) op.close() else: print s
def figureBalances(startDate, endDate, inputURIs=["/dev/stdin"]): global verbose # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base # Load the data: kb = loadMany(inputURIs) rates = loadMany(["currencies.n3"]) sts = kb.statementsMatching(pred=OFX.BANKTRANLIST) #if verbose: # print len(sts), " bank transaction lists." lists = [] for st in sts: tl = st.object() start = str(kb.any(tl, OFX.DTSTART))[:10] end = str(kb.any(tl, OFX.DTEND))[:10] # print "Transaction list %s - %s " %(start, end) lists.append((start, end, tl, st.subject())) lists.sort(reverse=1) # lists.reverse(); balances = [] first = {} g = {} for s, e, t, stmtrs in lists: # Do one statement, working backward to get the balance each date ac = kb.any(stmtrs, OFX.BANKACCTFROM) if ac == None: ac = kb.the(stmtrs, OFX.CCACCTFROM) #info("ac = "+`ac`) acid = str(kb.the(ac, OFX.ACCTID))[-4:] # info("Bank statment %s to %s for %s" % (s, e, acid)); # @@ ledgerBalance = kb.the(stmtrs, OFX.LEDGERBAL) curdef = kb.the(stmtrs, OFX.CURDEF).value() currency = cur.sym(curdef) conversionRate = 1 if (curdef != "USD"): conversionRate = rates.the(currency, cur.in_USD).value() # , None, kb.store.symbol(currencySource)).value(); balanceDate = str(kb.the(ledgerBalance, OFX.DTASOF))[:10] balance = float(str(kb.the(ledgerBalance, OFX.BALAMT))) * conversionRate transactionsThisStatement = [] for tran in kb.each(t, OFX.STMTTRN): transactionsThisStatement.append( (str(kb.the(tran, OFX.DTPOSTED))[:10], conversionRate * float(str(str(kb.the(tran, OFX.TRNAMT)))))) transactionsThisStatement.sort() transactionsThisStatement.reverse() bal, dat = float(str(balance)), balanceDate for d, a in transactionsThisStatement: # assert dat >= d, "Ooops '%s' < '%s' %d, %d in %s" % (dat, d, len(dat), len(d), acid) # print "\t\t%10s %10s\t%s\t%10.2f\t%10.2f" % (d, dat, acid, a, bal) balances.append((d, dat, acid, bal)) bal = bal - a first[acid] = [d, bal] dat = d balances.sort() if verbose: info("First: " + ` first `) return first, balances
def doCommand(startDate, endDate, inputURIs=["/dev/stdin"], totalsFilename=None): """Fin - financial summary <command> <options> <inputURIs> Totals transactions by classes to which they are known to belong This is or was http://www.w3.org/2000/10/swap/pim/fin.py """ #import urllib #import time import sys # global sax2rdf global kb, tax def noteError(e): if not errors.get(s, None): errors[s] = [] errors[s].append(e) # The base URI for this process - the Web equiv of cwd _baseURI = uripath.base() _outURI = _baseURI option_baseURI = _baseURI # To start with - then tracks running base fatalErrors = 0 # Load the data: kb = loadMany(inputURIs) qu_date = qu.date qu_in_USD = qu.in_USD qu_amount = qu.amount qu_payee = qu.payee qu_Classified = qu.Classified qu_Unclassified = qu.Unclassified taxCategories = kb.each(pred=rdf_type, obj=tax.Category) if verbose: progress("Tax categories" + ` taxCategories `) specialCategories = taxCategories + [ qu.Classified, qu.Unclassified, qu.Transaction ] ####### Analyse the data: numberOfMonths = monthOfDate(endDate) - monthOfDate(startDate) monthTotals = [0] * numberOfMonths incomeByMonth = [0] * numberOfMonths income, outgoings = 0, 0 outgoingsByMonth = [0] * numberOfMonths quCategories = kb.each(pred=rdf_type, obj=qu.Cat) bottomCategories = [] for c in quCategories: if isBottomClass(c): bottomCategories.append(c) totals = {} # Total by all classes of transaction count = {} # Number of transactions byMonth = {} sts = kb.statementsMatching(pred=qu.amount) # Ideally one per transaction errors = {} for st in sts: s = st.subject() uri = s.uriref() # classified = kb.each(pred=rdf_type, obj=qu_Classified) # unclassified = kb.each(pred=rdf_type, obj=qu_Unclassified) # for t in classified: assert t not in unclassified, "Can't be classified and unclassified!"+`t` # for s in classified + unclassified: # progress( "Transaction ", `s`) t_ok, c_ok = 0, 0 cats = allClasses(kb.each(subj=s, pred=rdf.type)) # progress( "Categories: "+`cats`) month = monthNumber(s) if month not in range(numberOfMonths): continue payees = kb.each(subj=s, pred=qu_payee) if not payees: progress("@@@ Error: No payee for " + ` uri `) payee = "@@@@@@ Unknown" fatalErrors += 1 elif len(payees) > 1 and str(payees[0]) == "Check": payee = payees[1] else: payee = payees[0] amounts = kb.each(subj=s, pred=qu_in_USD) if len(amounts) == 0: amounts = kb.each(subj=s, pred=qu_amount) if len(amounts) == 0: progress("@@@ Error: No USD amount for " + ` uri `) fatalErrors += 1 else: progress("Warning: No USD amount for " + ` uri ` + ", assuming USD") if len(amounts) > 1: if (cat_ns.Internal not in cats or len(amounts) != 2): fatalErrors += 1 progress( "Error: More than one USD amount %s for transaction %s -- ignoring!\n" % ( ` amounts `, uri)) else: sum = float(amounts[0]) + float(amounts[1]) if sum != 0: fatalErrors += 1 progress( "Sum %f not zero for USD amounts %s for internal transaction %s.\n" % (sum, amounts, uri)) continue if len(amounts) != 1: progress("@@@ Error: No amount for " + ` uri `) fatalErrors += 1 ss = kb.statementsMatching(subj=s) progress( ` ss ` + '; KB=' + ` kb.n3String() `) continue amount = float(amounts[0].__str__()) # print "%s %40s %10s month %i" %(date, payee, `amount`, month) monthTotals[month] = monthTotals[month] + amount if cat_ns.Internal not in cats: if amount > 0: incomeByMonth[month] = incomeByMonth[month] + amount income = income + amount else: outgoingsByMonth[month] = outgoingsByMonth[month] + amount outgoings = outgoings + amount normalCats = [] # For this item for c in cats: totals[c] = totals.get(c, 0) + amount byMonth[c] = byMonth.get(c, [0] * numberOfMonths) count[c] = count.get(c, 0) + 1 byMonth[c][month] = byMonth[c][month] + amount if c not in specialCategories: normalCats.append(c) bottomCats = normalCats[:] # Copy for b in normalCats: sups = kb.each(subj=b, pred=rdfs.subClassOf) for sup in sups: if sup in bottomCats: bottomCats.remove(sup) if len(bottomCats) == 0: noteError("No categoriy: for <%s>" # all cats: %s, raw cats:%s" % ( ` s `)) # ,`cats`, `kb.each(subj=s, pred=rdf.type)`) elif bottomCats[0] not in bottomCategories and (bottomCats[0] not in [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing ]): noteError("Be more specific: %s for <%s>" % ( ` bottomCats[0] `, ` s `)) # Won't get shown e.g. in year-cat.html if len(bottomCats) > 1: noteError( "Inconsistent categories: %s" # all cats: %s, raw cats:%s" % ( ` bottomCats `)) # ,`cats`, `kb.each(subj=s, pred=rdf.type)`) print '<html xmlns="http://www.w3.org/1999/xhtml">' if '--summary' in sys.argv: title = "Monthly summary" elif '--issues' in sys.argv: title = "Issues" else: title = "Report" print """<head> <meta charset='UTF-8'> <title>%s</title> <link rel="Stylesheet" href="report.css"> </head> <body> """ % (title) # <img src="sand-dollar.gif" alt="dollar" align="right"/> version = "$Id$" # SUMMARY TABLE OF CATEGORY BY MONTH if '--summary' in sys.argv: print "<h2>Personal categories and months %s - %s</h2>" % (startDate, endDate) print "<table class='wide' style='border-collapse:collapse; border: 0.01em solid #aaa; text-align: right' ><col style='text-align: left'>" print "<tr><th></th><th>Total </th>" for month in range(numberOfMonths): m = month + int(startDate[5:7]) - 1 while m > 11: m -= 12 # Modulo in python? print "<th><a href='year-chron.html#m%s'>%s</a></th>" % ( ("0" + ` m + 1 `)[-2:], monthName[m]), print "</tr>" def listFor(c, depth=0 ): # Any, because there could be 2 copies of same list :-( subs = kb.any(subj=c, pred=owl.disjointUnionOf) res = [(c, depth)] if subs == None: subs = kb.each(pred=rdfs.subClassOf, obj=c) if len(subs) > 0: sys.stderr.write( "Warning: for %s: no disjointUnionOf but subclasses %s\n" % ( ` c `, ` subs `)) for sub in subs: res += listFor(sub, depth + 1) else: for sub in subs: res += listFor(sub, depth + 1) return res printOrder = listFor(qu.Transaction) for cat, depth in printOrder: label = kb.the(subj=cat, pred=rdfs.label) if label == None: label = ` cat ` sys.stderr.write("@@ No label for " + ` cat ` + "\n") else: label = str(label) anchor = cat.fragid if totals.get(cat, None) != None: print monthGridRow(anchor, anchor, totals[cat], byMonth.get(cat, [0] * numberOfMonths), numberOfMonths, indent=depth) print "<tr><td colspan='14'> ___ </td></tr>" print monthGridRow("Income", None, income, incomeByMonth, numberOfMonths) print monthGridRow("Outgoings", None, outgoings, outgoingsByMonth, numberOfMonths) print monthGridRow("Balance", None, income + outgoings, monthTotals, numberOfMonths) print "</table>" # Chart of income stacked up against expenses if '--charts' in sys.argv: print "<p><a href='chart.svg'><p>Chart of day-day income vs expense</p><img src='chart.svg'></a></p>" print "<p><a href='all.svg'><p>Chart of all income vs expense</p><img src='all.svg'></a></p>" writeChart(filename="chart.svg", categories=bottomCategories + [qu.UnclassifiedIncome, qu.UnclassifiedOutgoing], totals=totals, income=income, outgoings=outgoings, shortTerm=1) writeChart(filename="all.svg", categories=bottomCategories + [qu.UnclassifiedIncome, qu.UnclassifiedOutgoing], totals=totals, income=income, outgoings=outgoings, shortTerm=0) # Output totals if (totalsFilename): ko = kb.newFormula() for c in quCategories + [ qu.UnclassifiedIncome, qu.UnclassifiedOutgoing ]: ko.add(subj=c, pred=qu.total, obj=("%7.2f" % totals.get(c, 0))) ko.add(subj=qu.Transaction, pred=qu.total, obj=("%7.2f" % (income + outgoings))) ko.close() fo = open(totalsFilename, "w") fo.write(ko.n3String()) fo.close if '--issues' in sys.argv: # Generate a list of errors found errstr = "" for x, list in errors.items(): errstr += transactionRow(x) for e in list: errstr += "<tr><td colspan='4'>" + ` e ` + "</td></tr>\n" # @@@ encode error string if errstr: print "<h2>Inconsistencies</h2><table>\n" + errstr + "</table>\n" # List Unclassified Income and Spending def transactionList(cat): ts = kb.each(pred=rdf.type, obj=cat) if len(ts) == 0: return "" label = kb.any(cat, rdfs.label) st = '<h2>' + label.value() + '</h2>\n' return st + transactionTable(ts) for cat in [qu.UnclassifiedIncome, qu.UnclassifiedOutgoing]: print transactionList(cat) print reimbursablesCheck() internalCheck() if 0: print "<h2>Tax Categories</h2>" taxCategories = kb.each(pred=rdf_type, obj=tax.Category) printCategoryTotalsOnly(taxCategories + [qu.Unclassified], totals, count) print "<h2>Tax stuff</h2>" print "<table>" print "<tr><th>-<th>Form line</th><th>amount</th></tr>" print "</table>" # print "<h2>Personal Category total</h2>" # printCategoryTotalsOnly(quCategories + [ qu.Unclassified], totals, count) print print "Note totals for tax and personal breakdowns must match." dates = kb.statementsMatching(pred=qu.date) print "There should be a total of %i transactions in each." % len( dates) if 0: print "<pre>(consistency check)" problems = 0 for s in dates: tra = s.subject() types = kb.each(subj=tra, pred=rdf_type) for typ in types: if typ is qu.Unclassified or typ is qu.Classified: break # ok else: print "@@@@ problem transcation with no classified or unclassified, with types", types printTransactionDetails(tra) problems = problems + 1 print problems, "problems.</pre>" print "</body></html>" return fatalErrors
def main(): global already, agenda, errors parseAs = None grammarFile = None parseFile = None yaccFile = None global verbose global g verbose = 0 lumped = 1 try: opts, args = getopt.getopt( sys.argv[1:], "ha:v:p:g:y:", ["help", "as=", "verbose=", "parse=", "grammar=", "yacc="]) except getopt.GetoptError: usage() sys.exit(2) output = None for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() if o in ("-v", "--verbose"): verbose = int(a) diag.chatty_flag = int(a) if o in ("-a", "--as"): parseAs = uripath.join(uripath.base(), a) if o in ("-p", "--parse"): parseFile = uripath.join(uripath.base(), a) if o in ("-g", "--grammar"): grammarFile = uripath.join(uripath.base(), a) if o in ("-y", "--yacc"): yaccFile = uripath.join(uripath.base(), a)[5:] # strip off file: # if testFiles == []: testFiles = [ "/dev/stdin" ] if not parseAs: usage() sys.exit(2) parseAs = uripath.join(uripath.base(), parseAs) if not grammarFile: grammarFile = parseAs.split("#")[0] # strip off fragid else: grammarFile = uripath.join(uripath.base(), grammarFile) # The Grammar formula progress("Loading " + grammarFile) start = clock() g = load(grammarFile) taken = clock() - start + 1 progress("Loaded %i statements in %fs, ie %f/s." % (len(g), taken, len(g) / taken)) document = g.newSymbol(parseAs) already = [] agenda = [] errors = [] doProduction(document) while agenda: x = agenda[0] agenda = agenda[1:] already.append(x) doProduction(x) if errors != []: progress("###### FAILED with %i errors." % len(errors)) for s in errors: progress("\t%s" % s) exit(-2) else: progress("Ok for predictive parsing") #if parser.verb: progress "Branch table:", branchTable if verbose: progress("Literal terminals: %s" % literalTerminals.keys()) progress("Token regular expressions:") for r in tokenRegexps: progress("\t%s matches %s" % (r, tokenRegexps[r].pattern)) if yaccFile: yacc = open(yaccFile, "w") yaccConvert(yacc, document, tokenRegexps) yacc.close() if parseFile == None: exit(0) ip = webAccess.urlopenForRDF(parseFile, None) lexer = sparql_tokens.Lexer() lexer.input(ip) #str = ip.read().decode('utf_8') sink = g.newFormula() keywords = g.each(pred=BNF.keywords, subj=document) keywords = [a.value() for a in keywords] p = PredictiveParser(sink=sink, top=document, branchTable=branchTable, tokenSet=tokenSet, keywords=keywords) p.verb = 1 start = clock() #print lexer.token() print p.parse(lexer.token) taken = clock() - start + 1 # progress("Loaded %i chars in %fs, ie %f/s." % # (len(str), taken, len(str)/taken)) progress("Parsed <%s> OK" % parseFile) sys.exit(0) # didn't crash
def main(): global verbose, proofs, chatty, normal, no_action start = 1 cwm_command = '../cwm.py' python_command = 'python -tt' global ploughOn # even if error ploughOn = 0 global verbose verbose = 0 global just_fix_it just_fix_it = 0 if diag.print_all_file_names: a = file('testfilelist', 'w') a.write('') a.close() try: opts, testFiles = getopt.getopt(sys.argv[1:], "h?s:nNcipf:v", [ "help", "start=", "testsFrom=", "no-action", "No-normal", "chatty", "ignoreErrors", "proofs", "verbose", "overwrite", "cwm=" ]) except getopt.GetoptError: # print help information and exit: usage() sys.exit(2) output = None for o, a in opts: if o in ("-h", "-?", "--help"): usage() sys.exit() if o in ("-v", "--verbose"): verbose = 1 if o in ("-i", "--ignoreErrors"): ploughOn = 1 if o in ("-s", "--start"): start = int(a) if o in ("-f", "--testsFrom"): testFiles.append(a) if o in ("-n", "--no-action"): no_action = 1 if o in ("-N", "--No-normal"): normal = 0 if o in ("-c", "--chatty"): chatty = 1 if o in ("-p", "--proofs"): proofs = 1 if o in ("--overwrite", ): just_fix_it = 1 if o in ("--cwm", "--the_end"): cwm_command = a assert system("mkdir -p ,temp") == 0 assert system("mkdir -p ,diffs") == 0 if proofs: assert system("mkdir -p ,proofs") == 0 tests = 0 passes = 0 global problems problems = [] REFWD = "http://example.com/swap/test" WD = base()[:-1] #def basicTest(case, desc, args) if verbose: progress("Test files:", testFiles) kb = loadMany(testFiles, referer="") testData = [] RDFTestData = [] RDFNegativeTestData = [] perfData = [] n3PositiveTestData = [] n3NegativeTestData = [] sparqlTestData = [] # for fn in testFiles: # print "Loading tests from", fn # kb=load(fn) for t in kb.each(pred=rdf.type, obj=test.CwmTest): verboseDebug = kb.contains(subj=t, pred=rdf.type, obj=test.VerboseTest) u = t.uriref() ref = kb.the(t, test.referenceOutput) if ref == None: case = str(kb.the(t, test.shortFileName)) refFile = "ref/%s" % case else: refFile = refTo(base(), ref.uriref()) case = "" for ch in refFile: if ch in "/#": case += "_" else: case += ch # Make up test-unique temp filename