def buildNodeSet(self, goal, antecedent=None, proof=False): if not goal in self.network.justifications: #Not inferred, must have been originally asserted #assert goal not in self.network.workingMemory self.trace.append("Building %s around%sgoal (justified by a direct assertion): %s" % ( proof and 'proof' or 'nodeset', antecedent and ' antecedent ' or '', str(buildUniTerm(goal, self.network.nsMap)))) # assertedSteps = [token.asTuple() for token in self.network.workingMemory] #assert goal in assertedSteps if goal in self.goals: ns = self.goals[goal] self.trace.append("Retrieving prior nodeset %s for %s" % (ns, goal)) else: idx = BNode() ns = NodeSet(goal, network=self.network, identifier=idx) self.goals[goal] = ns ns.steps.append(InferenceStep(ns, source='some RDF graph')) self.trace.append("Marking justification from assertion for " + repr(goal)) else: if goal in self.goals: ns = self.goals[goal] self.trace.append("Retrieving prior nodeset %s for %s" % (ns, goal)) else: self.trace.append("Building %s around%sgoal: %s" % ( proof and 'proof' or 'nodeset', antecedent and ' antecedent ' or ' ', str(buildUniTerm(goal, self.network.nsMap)))) idx = BNode() ns = NodeSet(goal, network=self.network, identifier=idx) self.goals[goal] = ns ns.steps = [self.buildInferenceStep(ns, tNode, goal) for tNode in fetchRETEJustifications(goal, ns, self)] assert ns.steps return ns
def __repr__(self): # rt = "Proof step for %s with %s justifications" % ( # buildUniTerm(self.conclusion), len(self.steps)) conclusionPrefix = self.naf and 'not ' or '' rt = "Proof step for %s%s" % ( conclusionPrefix, buildUniTerm(self.conclusion, self.network and self.network.nsMap or {})) return rt
def __repr__(self): #rt="Proof step for %s with %s justifications"%(buildUniTerm(self.conclusion),len(self.steps)) conclusionPrefix = self.naf and 'not ' or '' rt = "Proof step for %s%s" % (conclusionPrefix, buildUniTerm( self.conclusion, self.network and self.network.nsMap or {})) return rt
def buildNodeSet(self, goal, antecedent=None, proof=False): if not goal in self.network.justifications: #Not inferred, must have been originally asserted #assert goal not in self.network.workingMemory self.trace.append( "Building %s around%sgoal (justified by a direct assertion): %s" % (proof and 'proof' or 'nodeset', antecedent and ' antecedent ' or '', str(buildUniTerm(goal, self.network.nsMap)))) assertedSteps = [ token.asTuple() for token in self.network.workingMemory ] #assert goal in assertedSteps if goal in self.goals: ns = self.goals[goal] self.trace.append("Retrieving prior nodeset %s for %s" % (ns, goal)) else: idx = BNode() ns = NodeSet(goal, network=self.network, identifier=idx) self.goals[goal] = ns ns.steps.append(InferenceStep(ns, source='some RDF graph')) self.trace.append("Marking justification from assertion for " + repr(goal)) else: if goal in self.goals: ns = self.goals[goal] self.trace.append("Retrieving prior nodeset %s for %s" % (ns, goal)) else: self.trace.append( "Building %s around%sgoal: %s" % (proof and 'proof' or 'nodeset', antecedent and ' antecedent ' or ' ', str(buildUniTerm(goal, self.network.nsMap)))) idx = BNode() ns = NodeSet(goal, network=self.network, identifier=idx) self.goals[goal] = ns ns.steps = [ self.buildInferenceStep(ns, tNode, goal) for tNode in fetchRETEJustifications(goal, ns, self) ] assert ns.steps return ns
def serialize(self,builder,proofGraph): # if self.identifier in builder.serializedNodeSets: # return proofGraph.add((self.identifier,PML.hasConclusion,Literal(repr(buildUniTerm(self.conclusion,self.network.nsMap))))) #proofGraph.add((self.identifier,PML.hasLanguage,URIRef('http://inferenceweb.stanford.edu/registry/LG/RIF.owl'))) proofGraph.add((self.identifier,RDF.type,PML.NodeSet)) for step in self.steps: proofGraph.add((self.identifier,PML.isConsequentOf,step.identifier)) builder.serializedNodeSets.add(self.identifier) step.serialize(builder,proofGraph)
def serialize(self, builder, proofGraph): conclusionPrefix = self.naf and 'not ' or '' proofGraph.add( (self.identifier, PML.hasConclusion, Literal( "%s%s" % (conclusionPrefix, repr(buildUniTerm(self.conclusion, self.network.nsMap)))))) # proofGraph.add((self.identifier, PML.hasLanguage, URIRef('http://inferenceweb.stanford.edu/registry/LG/RIF.owl'))) proofGraph.add((self.identifier, RDF.type, PML.NodeSet)) for step in self.steps: proofGraph.add( (self.identifier, PML.isConsequentOf, step.identifier)) builder.serializedNodeSets.add(self.identifier) step.serialize(builder, proofGraph)
def serialize(self, builder, proofGraph): conclusionPrefix = self.naf and 'not ' or '' proofGraph.add((self.identifier, PML.hasConclusion, Literal("%s%s" % (conclusionPrefix, repr(buildUniTerm( self.conclusion, self.network.nsMap)))))) # proofGraph.add( # (self.identifier, # PML.hasLanguage, # URIRef('http://inferenceweb.stanford.edu/registry/LG/RIF.owl'))) proofGraph.add((self.identifier, RDF.type, PML.NodeSet)) for step in self.steps: proofGraph.add( (self.identifier, PML.isConsequentOf, step.identifier)) builder.serializedNodeSets.add(self.identifier) step.serialize(builder, proofGraph)
def DerivedPredicateIterator(factsOrBasePreds, ruleset, strict=DDL_STRICTNESS_FALLBACK_DERIVED, defaultPredicates=None): if not defaultPredicates: defaultPredicates = [], [] defaultBasePreds, defaultDerivedPreds = defaultPredicates basePreds = [GetOp(buildUniTerm(fact)) for fact in factsOrBasePreds if fact[1] != LOG.implies] processed = {True: set(), False: set()} derivedPreds = set() uncertainPreds = set() ruleBodyPreds = set() ruleHeads = set() for rule in ruleset: if rule.formula.body: for idx, term in enumerate(itertools.chain(iterCondition(rule.formula.head), iterCondition(rule.formula.body))): # iterate over terms from head to end of body op = GetOp(term) if op not in processed[idx > 0]: # not processed before if idx > 0: # body literal ruleBodyPreds.add(op) else: # head literal ruleHeads.add(op) if strict in DDL_MUST_CHECK and \ not (op not in basePreds or idx > 0): # checking DDL well formedness and # op is a base predicate *and* a head literal (derived) if strict in DDL_FALLBACK: mark = strict == DDL_STRICTNESS_FALLBACK_DERIVED and \ 'derived' or 'base' if strict == DDL_STRICTNESS_FALLBACK_DERIVED and \ op not in defaultBasePreds: # a clashing predicate is marked as derived due # to level of strictness derivedPreds.add(op) elif strict == DDL_STRICTNESS_FALLBACK_BASE and \ op not in defaultDerivedPreds: # a clashing predicate is marked as base dur # to level of strictness defaultBasePreds.append(op) import warnings warnings.warn( "predicate symbol of %s is in both IDB and EDB. Marking as %s" % (term, mark)) else: raise SyntaxError( "%s is a member of a derived predicate and a base predicate." % term) if op in basePreds: # base predicates are marked for later validation uncertainPreds.add(op) else: if idx == 0 and not isinstance(op, Variable): # head literal with proper predicate symbol # identify as a derived predicate derivedPreds.add(op) elif not isinstance(op, Variable): # body literal with proper predicate symbol # mark for later validation uncertainPreds.add(op) processed[idx > 0].add(op) for pred in uncertainPreds: # for each predicate marked as 'uncertain' # do further checking if (pred not in ruleBodyPreds and not isinstance(pred, Variable)) or\ pred in ruleHeads: # pred is not in a body literal and is a proper predicate symbol # or it is a rule head -> mark as a derived predicate derivedPreds.add(pred) for pred in derivedPreds: if not pred in defaultBasePreds: yield pred
def convert2NormalUterm(self): return buildUniTerm(self.toRDFTuple())
def __repr__(self): #rt="Proof step for %s with %s justifications"%(buildUniTerm(self.conclusion),len(self.steps)) rt="Proof step for %s"%(buildUniTerm(self.conclusion,self.network.nsMap)) return rt
def DerivedPredicateIterator(factsOrBasePreds, ruleset, strict=DDL_STRICTNESS_FALLBACK_DERIVED, defaultPredicates=None): if not defaultPredicates: defaultPredicates = [], [] defaultBasePreds, defaultDerivedPreds = defaultPredicates basePreds = [ GetOp(buildUniTerm(fact)) for fact in factsOrBasePreds if fact[1] != LOG.implies ] processed = {True: set(), False: set()} derivedPreds = set() uncertainPreds = set() ruleBodyPreds = set() ruleHeads = set() for rule in ruleset: if rule.formula.body: for idx, term in enumerate( itertools.chain(iterCondition(rule.formula.head), iterCondition(rule.formula.body))): # iterate over terms from head to end of body op = GetOp(term) if op not in processed[idx > 0]: # not processed before if idx > 0: # body literal ruleBodyPreds.add(op) else: # head literal ruleHeads.add(op) if strict in DDL_MUST_CHECK and \ not (op not in basePreds or idx > 0): # checking DDL well formedness and # op is a base predicate *and* a head literal (derived) if strict in DDL_FALLBACK: mark = strict == DDL_STRICTNESS_FALLBACK_DERIVED and \ 'derived' or 'base' if strict == DDL_STRICTNESS_FALLBACK_DERIVED and \ op not in defaultBasePreds: # a clashing predicate is marked as derived due # to level of strictness derivedPreds.add(op) elif strict == DDL_STRICTNESS_FALLBACK_BASE and \ op not in defaultDerivedPreds: # a clashing predicate is marked as base dur # to level of strictness defaultBasePreds.append(op) import warnings warnings.warn( "predicate symbol of %s is in both IDB and EDB. Marking as %s" % (term, mark)) else: raise SyntaxError( "%s is a member of a derived predicate and a base predicate." % term) if op in basePreds: # base predicates are marked for later validation uncertainPreds.add(op) else: if idx == 0 and not isinstance(op, Variable): # head literal with proper predicate symbol # identify as a derived predicate derivedPreds.add(op) elif not isinstance(op, Variable): # body literal with proper predicate symbol # mark for later validation uncertainPreds.add(op) processed[idx > 0].add(op) for pred in uncertainPreds: # for each predicate marked as 'uncertain' # do further checking if (pred not in ruleBodyPreds and not isinstance(pred, Variable)) or\ pred in ruleHeads: # pred is not in a body literal and is a proper predicate symbol # or it is a rule head -> mark as a derived predicate derivedPreds.add(pred) for pred in derivedPreds: if not pred in defaultBasePreds: yield pred