def __call__(self, node): # ====================================================================== # Initialization # ====================================================================== if not isinstance(node, NLInstruction): raise ValueError('Argument must be NLInstruction, got %s' % type(node).__name__) if self.prac.verbose > 0: print(prac_heading('Parsing %s' % node)) infstep = PRACInferenceStep(node, self) # ====================================================================== # Preprocessing # ====================================================================== instr = self.compounds(node.instr) # ====================================================================== # Parsing Instructions # ====================================================================== if self.prac.verbose > 0: print( colorize('Parsing instruction: "%s"', (None, 'white', True), True) % instr) dbs = self.parse([instr]) #----------------------------------------------------------------------- # here come some dirty hacks to catch some very frequent and # annoying parsing errors: # 1. "season" is consequently tagged as a noun. We retag it as a verb for db in dbs: for q in db.query('has_pos(?w,NN)'): if q['?w'].lower().startswith('season'): db['has_pos(%s,NN)' % q['?w']] = 0 db['has_pos(%s,VB)' % q['?w']] = 1 pngs = {} for i, db in enumerate(dbs): infstep.outdbs.append(db) if self.prac.verbose > 1: print() print( colorize('Syntactic evidence:', (None, 'white', True), True)) db.write(sys.stdout, True) print() pngs['NL Parsing - ' + str(i)] = get_cond_prob_png( ','.join([x.name for x in self.mln.predicates[:10]]) + ',...', str(node.instr), filename=self.name) infstep.png = pngs yield node
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== dbs = node.outdbs infstep = PRACInferenceStep(node, self) infstep.indbs = [db.copy() for db in dbs] infstep.outdbs = [db.copy() for db in dbs] logger.debug('Running {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Generating CRAM Plan(s)')) if not hasattr(self.prac.actioncores[node.frame.actioncore], 'plan'): raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence) yield ac = self.prac.actioncores[node.frame.actioncore] # fill dictionary with all inferred roles... acdict = dict([(k, v.type) for k, v in list(node.frame.actionroles.items())]) # ..and their properties acdict.update( dict([('{}_props'.format(k), ' '.join([ '({} {})'.format(pkey, pval) for pkey, pval in list(v.props.tojson().items()) ])) for k, v in list(node.frame.actionroles.items())])) # update dictionary with missing roles and roles properties for role in ac.roles: if acdict.get(role) is None: acdict[role] = 'Unknown' acdict['{}_props'.format(role)] = '' node.plan = ac.parameterize_plan(**acdict) if self.prac.verbose: print() print(prac_heading('PLAN GENERATION RESULTS')) print(colorize('actioncore:', (None, 'white', True), True), colorize(ac.name, (None, 'cyan', True), True)) print(colorize('assignments:', (None, 'white', True), True)) for x in acdict: print('\t{}: {}'.format( colorize(x, (None, 'white', True), True), colorize(acdict[x], (None, 'cyan', True), True)))
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Role COMPLETION: %s' % node.frame.actioncore)) dbs = node.outdbs infstep = PRACInferenceStep(node, self) infstep.executable_plans = [] pngs = {} for i, db in enumerate(dbs): # ================================================================== # Mongo Lookup # ================================================================== infstep.indbs.append(db.copy()) db_, missingroles = self.determine_missing_roles(node, db) if self.prac.verbose > 1: print() print(prac_heading('ROLE COMPLETION RESULTS')) for m in missingroles: r = node.frame.actionroles.get(m) if r: print(m, r.type) # ================================================================== # Postprocessing # ================================================================== infstep.outdbs.append(db_) for word, actioncore in db.actioncores(): pngs['LookUp - ' + str(i)] = get_query_png(list(missingroles), dbs, filename=self.name, skolemword=word) infstep.png = pngs infstep.applied_settings = { 'module': 'missing_roles', 'method': 'DB lookup' } return [node]
def infer(self, pracinference): inf_step = PRACInferenceStep(pracinference, self) for db in pracinference.get_inference_steps_of_module( 'nl_parsing').output_dbs: database = Database(self.prac.mln) for truth, gndLit in db.iterGroundLiteralStrings(): database << (gndLit, truth) logger.info(gndLit) logger.info('Adding all similarities...') self.addPossibleWordSensesToDBs(database) inf_step.output_dbs.append(database) return inf_step
def __call__(self, pracinference, **params): print(prac_heading('Word Sense Disambiguation')) if params.get('kb', None) is None: # load the default arguments dbs = pracinference.inference_steps[-1].output_dbs kb = self.load_prac_kb('default') kb.dbs = dbs else: kb = params['kb'] if not hasattr(kb, 'dbs'): kb.dbs = pracinference.inference_steps[-1].output_dbs mln = kb.query_mln mln.write() logic = kb.query_params['logic'] fol = False if (logic == 'FirstOrderLogic'): fol = True known_concepts = mln.domains.get('concept', []) inf_step = PRACInferenceStep(pracinference, self) wordnet_module = self.prac.module('wn_senses') for db in kb.dbs: db = wordnet_module.get_senses_and_similarities(db, known_concepts) result_db = list(kb.infer(db)) inf_step.output_dbs.extend(result_db) print() for r_db in result_db: print(prac_heading('Inferred most probable word senses')) for q in r_db.query('has_sense(?w, ?s)'): if q['?s'] == 'null': continue print('{}:'.format(q['?w'])) wordnet_module.printWordSenses( wordnet_module.get_possible_meanings_of_word( r_db, q['?w']), q['?s']) print() return inf_step
def __call__(self, pracinference, **params): logger.info('Running {}'.format(self.name)) print prac_heading('Recognizing Objects') # load default project projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject) project = MLNProject.open(projectpath) inf_step = PRACInferenceStep(pracinference, self) dbs = pracinference.inference_steps[-1].output_dbs mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FuzzyLogic'), grammar=project.queryconf.get('grammar', 'PRACGrammar')) wordnet_module = self.prac.module('wn_senses') # adding evidence properties to new query db for db in dbs: # find properties and add word similarities logger.error(db.domains) logger.error(mln.domains) output_db = wordnet_module.add_similarities(db, mln) output_db.write() # infer and update output dbs infer = self.mlnquery(config=project.queryconf, db=output_db, mln=mln) result_db = infer.resultdb inf_step.outdbs.append(result_db) return inf_step
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print prac_heading('Resolving Coreferences') preds = list( node.rdfs( goaltest=lambda n: isinstance(n, FrameNode) and not n.children, all=True))[:2] dbs = node.outdbs infstep = PRACInferenceStep(node, self) projectpath = os.path.join(pracloc.pracmodules, self.name) ac = None pngs = {} # if not preds: return [] # ====================================================================== # Preprocessing # ====================================================================== # merge output dbs from senses_and_roles step, containing # roles inferred from multiple sentences. if not preds: # no coreferencing required - forward dbs and settings # from previous module infstep.indbs = [db.copy() for db in dbs] infstep.outdbs = [db.copy() for db in infstep.indbs] logger.debug( '%s has no predecessors. Nothing to do here. Passing db...' % node) return [node] # retrieve all words from the dbs to calculate distances. # Do not use pracinference.instructions as they are not # annotated by the Stanford parser. sentences = [db.words() for pred in preds for db in pred.indbs] infstep.indbs = [db.copy() for db in dbs] # infstep.outdbs = [db.copy() for db in infstep.indbs] # query action core to load corresponding project actioncore = node.frame.actioncore # clear corefdb and unify current db with the two preceding ones corefdb = PRACDatabase(self.prac) corefdb = corefdb.union(dbs, self.prac.mln) # for s in range(max(0, i - 2), i+1): # corefdb = corefdb.union(dbs[s], self.prac.mln) for pred in preds: logger.debug('unifying with %s' % pred) for db in pred.indbs: corefdb = corefdb.union(db, self.prac.mln) # remove all senses from the databases' domain that are not # assigned to any word. for q in corefdb.query('!(EXIST ?w (has_sense(?w,?sense)))'): corefdb.rmval('sense', q['?sense']) try: # preprocessing: adding distance information for each # word in the instructions # s = words[max(0, i - 2):i+1] # snts = list(enumerate(s)) # idx = len(snts) - 1 # idx of current sentence # for s in snts[:-1]: # idx2 = s[0] # for w in s[1]: # corefdb << 'distance({},DIST{})'.format(w, idx - idx2) for sidx, s in enumerate(sentences): for w in s: cont = True for q in corefdb.query('distance({}, ?w)'.format(w)): cont = False break if not cont: continue corefdb << 'distance({},DIST{})'.format(w, sidx) # print 'distance({},DIST{})'.format(w, sidx) logger.debug('loading Project: {}'.format( colorize(actioncore, (None, 'cyan', True), True))) project = MLNProject.open( os.path.join(projectpath, '{}.pracmln'.format(actioncore))) mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FuzzyLogic'), grammar=project.queryconf.get( 'grammar', 'PRACGrammar')) except MLNParsingError: logger.warning( 'Could not use MLN in project {} for coreference resolution'. format(colorize(actioncore, (None, 'cyan', True), True))) infstep.outdbs = [db.copy(self.prac.mln) for db in dbs] infstep.png = node.parent.laststep.png infstep.applied_settings = node.parent.laststep.applied_settings return [node] except Exception: infstep.outdbs = [db.copy(self.prac.mln) for db in dbs] infstep.png = node.parent.laststep.png infstep.applied_settings = node.parent.laststep.applied_settings logger.warning( 'Could not load project "{}". Passing dbs to next module...'. format(ac)) return [node] # adding similarities wnmod = self.prac.module('wn_senses') newdatabase = wnmod.add_sims(corefdb, mln) # update queries depending on missing roles acroles = filter(lambda role: role != 'action_verb', self.prac.actioncores[actioncore].roles) missingroles = [ ar for ar in acroles if len(list(newdatabase.query('{}(?w,{})'.format(ar, actioncore)))) == 0 ] conf = project.queryconf conf.update({'queries': ','.join(missingroles)}) print colorize('querying for missing roles {}'.format(conf['queries']), (None, 'green', True), True) # asserting impossible role-ac combinations, leaving previously # inferred roles untouched fulldom = mergedom(mln.domains, newdatabase.domains) ac_domains = [dom for dom in fulldom if '_ac' in dom] acs = list(set([v for a in ac_domains for v in fulldom[a]])) acs = filter(lambda ac_: ac_ != actioncore, acs) for ac1 in acs: for r in missingroles: for w in newdatabase.domains['word']: # words with no sense are asserted false if list( corefdb.query( '!(EXIST ?sense (has_sense({},?sense)))'. format(w))): newdatabase << '!{}({},{})'.format(r, w, actioncore) # leave previously inferred information roles # untouched if list(newdatabase.query('{}({},{})'.format(r, w, ac1))): continue else: newdatabase << '!{}({},{})'.format(r, w, ac1) try: # ========================================================== # Inference # ========================================================== infer = self.mlnquery(config=conf, verbose=self.prac.verbose > 2, db=newdatabase, mln=mln) if self.prac.verbose == 2: print print prac_heading('INFERENCE RESULTS') infer.write() # ========================================================== # Postprocessing # ========================================================== # merge initial db with results for db in infstep.indbs: resultdb = db.copy() for res in infer.results.keys(): if infer.results[res] != 1.0: continue resultdb << str(res) _, _, args = self.prac.mln.logic.parse_literal(res) w = args[0] for q in newdatabase.query( 'has_sense({0},?s) ^ has_pos({0},?pos)'.format(w)): resultdb << 'has_sense({},{})'.format(w, q['?s']) resultdb << 'is_a({0},{0})'.format(q['?s']) resultdb << 'has_pos({},{})'.format(w, q['?pos']) resultdb = wnmod.add_sims(resultdb, mln) # enhance the frame data for mrole in missingroles: for q in resultdb.query( '{role}(?w, {actioncore}) ^ has_sense(?w, ?s)'. format(role=mrole, actioncore=actioncore)): for p in preds: if p.frame.object(q['?w']) is not None: node.frame.actionroles[mrole] = p.frame.object( q['?w']) break infstep.outdbs.append(resultdb) pprint(node.frame.tojson()) except NoConstraintsError: logger.debug('No coreferences found. Passing db...') infstep.outdbs.append(db) except Exception: logger.error('Something went wrong') traceback.print_exc() pngs['Coref - ' + str(node)] = get_cond_prob_png(project.queryconf.get( 'queries', ''), dbs, filename=self.name) infstep.png = pngs infstep.applied_settings = project.queryconf.config return [node]
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Recognizing Action Cores')) if params.get('project', None) is None: # load default project projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject) ac_project = MLNProject.open(projectpath) else: logger.info( colorize('Loading Project from params', (None, 'cyan', True), True)) projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) ac_project = params.get('project') dbs = node.outdbs mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=ac_project.queryconf.get('logic', 'FirstOrderLogic'), grammar=ac_project.queryconf.get( 'grammar', 'PRACGrammar')) known_concepts = mln.domains.get('concept', []) infstep = PRACInferenceStep(node, self) wnmod = self.prac.module('wn_senses') pngs = {} nlinstr = node.nlinstr() sidx = nlinstr.idx() sentence = nlinstr.instr for db_ in dbs: # ================================================================== # Preprocessing # ================================================================== db = wnmod.get_senses_and_similarities(db_, known_concepts) tmp_union_db = db.union(db_, mln=self.prac.mln) infstep.indbs.append(tmp_union_db) # ================================================================== # Inference # ================================================================== infer = self.mlnquery(config=ac_project.queryconf, verbose=self.prac.verbose > 2, db=tmp_union_db, mln=mln) resultdb = infer.resultdb if self.prac.verbose == 2: print() print(prac_heading('INFERENCE RESULTS')) infer.write() # ================================================================== # Postprocessing # ================================================================== unified_db = resultdb.union(tmp_union_db, mln=self.prac.mln) # infstep.outdbs infstep.outdbs.extend( self.extract_multiple_action_cores(self.prac, unified_db, wnmod, known_concepts)) pngs[unified_db.domains.get('actioncore', [None])[0]] = get_cond_prob_png( ac_project.queryconf.get( 'queries', ''), dbs, filename=self.name) infstep.png = pngs infstep.applied_settings = ac_project.queryconf.config pred = None for outdb in infstep.outdbs: # out('in ac rec:') # for w, ac in outdb.actioncores(): # out(w, ac) for frame in node.pracinfer.buildframes(outdb, sidx, sentence): node_ = FrameNode(node.pracinfer, frame, node, pred, indbs=[outdb], prevmod=self.name) pred = node_ yield node_ break else: logger.error('no actioncore recognized in %s' % node) raise Exception('no actioncore recognized in %s' % node)
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Refining Actioncores')) dbs = node.outdbs infstep = PRACInferenceStep(node, self) # if node.previous_module == 'achieved_by': # raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence) # ====================================================================== # Preprocessing # ====================================================================== for olddb in dbs: infstep.indbs.append(olddb.copy()) #To handle multiple acs in one task, we have to check if the single # dbs contain achieved_bys which representing already plans pngs = {} actioncore = node.frame.actioncore mod = self.prac.module('complex_achieved_by') newnodes = list(mod(node)) n = None parentframes = [ p.frame for p in node.parentspath() if isinstance(p, FrameNode) ] if any(n.frame in parentframes for n in newnodes): logger.error( 'aborting reasoning because of infinite loop. (%s)' % node.frame) node.children = [] else: for n in newnodes: yield n if n is not None: return if n is None: # This list is used to avoid an infinite loop during the # achieved by inference. # To avoid this infinite loop, the list contains the pracmlns # which were inferenced during the process. # Every pracmln should be used only once during the process # because the evidence for the inference will always remain # the same. # So if the pracmln hadnt inferenced a plan in the first time, # it will never do it. # Need to remove possible achieved_by predicates from # previous achieved_by inferences db_ = PRACDatabase(self.prac) for atom, truth in sorted(olddb.evidence.items()): if 'achieved_by' in atom: continue db_ << (atom, truth) if params.get('project', None) is None: logger.debug('Loading Project: {}.pracmln'.format( colorize(actioncore, (None, 'cyan', True), True))) projectpath = os.path.join(pracloc.pracmodules, self.name, '{}.pracmln'.format(actioncore)) if os.path.exists(projectpath): project = MLNProject.open(projectpath) else: infstep.outdbs.append(olddb) logger.error(actioncore + ".pracmln does not exist.") return else: logger.debug( colorize('Loading Project from params', (None, 'cyan', True), True)) projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) project = params.get('project') mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln( mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FirstOrderLogic'), grammar=project.queryconf.get('grammar', 'PRACGrammar')) known_concepts = mln.domains.get('concept', []) wnmod = self.prac.module('wn_senses') #Merge domains of db and given mln to avoid errors due to role inference and the resulting missing fuzzy perdicates known_concepts = list( set(known_concepts).union( set(db_.domains.get('concept', [])))) db = wnmod.get_senses_and_similarities(db_, known_concepts) unified_db = db_.union(db) dbnew = wnmod.add_sims(unified_db, unified_db) # Inference achieved_by predicate db_ = self.extendDBWithAchievedByEvidence( dbnew, mln, actioncore) # ============================================================== # Inference # ============================================================== # db_.write() try: infer = self.mlnquery(config=project.queryconf, verbose=self.prac.verbose > 2, db=db_, mln=mln) except NoConstraintsError: logger.error( 'achieved_by inference failed due to NoConstraintsError: %s' % node.frame) return result_db = infer.resultdb if self.prac.verbose == 2: print() print(prac_heading('INFERENCE RESULTS')) infer.write() # ============================================================== # Postprocessing # ============================================================== # unified_db = result_db.union(kb.query_mln, db_) # only add inferred achieved_by atoms, leave out # 0-evidence atoms for qa in result_db.query('achieved_by(?ac1,?ac2)'): if qa['?ac2'] == 'Complex': continue unified_db << 'achieved_by({},{})'.format( qa['?ac1'], qa['?ac2']) pngs[qa['?ac2']] = get_cond_prob_png(project.queryconf.get( 'queries', ''), dbs, filename=self.name) newframe = Frame(self.prac, node.frame.sidx, '', words=[], syntax=[], actioncore=qa['?ac2'], actionroles={}) # out('->', newframe) infstep.outdbs.append(unified_db) yield FrameNode(node.pracinfer, newframe, node, pred=None, indbs=[unified_db], prevmod=self.name) return infstep.outdbs.append(unified_db) # raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print prac_heading('Update roles based on Action Core Refinement') dbs = node.outdbs infstep = PRACInferenceStep(node, self) # planlist = self.getPlanList() # out(node.parent.frame, '->', node.frame) pngs = {} for i, db_ in enumerate(dbs): # db = db_.copy() # db = PRACDatabase(self.prac) # ================================================================== # Preprocessing # ================================================================== actioncore = node.frame.actioncore logger.debug('Action core: {}'.format(actioncore)) if params.get('project', None) is None: logger.debug('Loading Project: {}.pracmln'.format( colorize(actioncore, (None, 'cyan', True), True))) projectpath = os.path.join( pracloc.pracmodules, self.name, '{}Transformation.pracmln'.format(actioncore)) project = MLNProject.open(projectpath) else: logger.debug( colorize('Loading Project from params', (None, 'cyan', True), True)) projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) project = params.get('project') mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln( mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FirstOrderLogic'), grammar=project.queryconf.get('grammar', 'PRACGrammar')) result_db = None for pdb in node.parent.outdbs: db = pdb.copy() db = db.union(db_) objs = {o.id for o in node.parent.frame.actionroles.values()} for w in set(db.domains['word']): if w not in objs: db.rmval('word', w) infstep.indbs.append(db) ac = node.parent.frame.actioncore db << 'achieved_by(%s, %s)' % (ac, actioncore) for role, object_ in node.parent.frame.actionroles.iteritems(): db << '%s(%s, %s)' % (role, object_.id, ac) try: # ========================================================== # Inference # ========================================================== infer = self.mlnquery(config=project.queryconf, db=db, verbose=self.prac.verbose > 2, mln=mln) result_db = infer.resultdb if self.prac.verbose == 2: print print prac_heading('INFERENCE RESULTS') print infer.write() except NoConstraintsError: logger.error( 'no constraints in role transformation: %s -> %s' % (node.parent.frame, node.frame)) result_db = db # ============================================================== # Postprocessing # ============================================================== r_db = PRACDatabase(self.prac) roles = self.prac.actioncores[actioncore].roles for atom, truth in sorted(result_db.evidence.iteritems()): if any(r in atom for r in roles): _, predname, args = self.prac.mln.logic.parse_literal(atom) word, ac = args if ac == actioncore: r_db << (atom, truth) if truth: sense = pdb.sense(word) props = pdb.properties(word) obj = Object(self.prac, id_=word, type_=sense, props=props, syntax=node.pracinfer.buildword( pdb, word)) node.frame.actionroles[predname] = obj # out('->', node.frame) unified_db = db.union(r_db, mln=self.prac.mln) r_db_ = PRACDatabase(self.prac) # It will be assumed that there is only one true action_ # c1ore predicate per database # for actionverb, actioncore in unified_db.actioncores(): break for atom, truth in sorted(unified_db.evidence.iteritems()): if 'action_core' in atom: continue r_db_ << (atom, truth) infstep.outdbs.append(r_db_) pngs['RolesTransformation - ' + str(i)] = get_cond_prob_png( project.queryconf.get('queries', ''), dbs, filename=self.name) infstep.png = pngs infstep.applied_settings = project.queryconf.config return [node]
def __call__(self, pracinference, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Recognizing Control Structures')) if params.get('project', None) is None: # load default project projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject) ac_project = MLNProject.open(projectpath) else: logger.info( colorize('Loading Project from params', (None, 'cyan', True), True)) projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) ac_project = params.get('project') dbs = pracinference.inference_steps[-1].output_dbs mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=ac_project.queryconf.get('logic', 'FirstOrderLogic'), grammar=ac_project.queryconf.get( 'grammar', 'PRACGrammar')) inf_step = PRACInferenceStep(pracinference, self) pngs = {} for i, db in enumerate(dbs): db_ = db.copy() # ====================================================================== # Inference # ====================================================================== infer = self.mlnquery(config=ac_project.queryconf, db=db, mln=mln) result_db = infer.resultdb if self.prac.verbose == 2: print() print(prac_heading('INFERENCE RESULTS')) infer.write() # ========================================================== # Postprocessing # ========================================================== for q in result_db.query('event(?w,?ac)'): db_ << 'event({},{})'.format(q['?w'], q['?ac']) for q in result_db.query('condition(?w)'): db_ << 'condition({})'.format(q['?w']) inf_step.output_dbs.append(db_) pngs['CS' + str(i)] = get_cond_prob_png(ac_project.queryconf.get( 'queries', ''), dbs, filename=self.name) inf_step.png = pngs inf_step.applied_settings = ac_project.queryconf.config return inf_step
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print(prac_heading('Property Extraction')) if params.get('project', None) is None: # load default project projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject) project = MLNProject.open(projectpath) else: # load project from params projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) project = params.get('project') dbs = node.outdbs infstep = PRACInferenceStep(node, self) mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FuzzyLogic'), grammar=project.queryconf.get('grammar', 'PRACGrammar')) wnmod = self.prac.module('wn_senses') pngs = {} for i, db in enumerate(dbs): # ================================================================== # Preprocessing # ================================================================== db_ = wnmod.add_sims(db, mln) infstep.indbs.append(db_) try: # ============================================================== # Inference # ============================================================== infer = self.mlnquery(config=project.queryconf, verbose=self.prac.verbose > 2, db=db_, mln=mln) result_db = infer.resultdb if self.prac.verbose == 2: print() print(prac_heading('INFERENCE RESULTS')) print() infer.write() # ============================================================== # Postprocessing # ============================================================== unified_db = db.copy(self.prac.mln) props = [ p for p in project.queryconf.get('queries', '').split(',') if p != 'has_sense' ] for p in props: for q in result_db.query( '{}(?w1,?w2) ^ has_sense(?w2,?s2)'.format(p)): unified_db << '{}({},{})'.format(p, q['?w1'], q['?w2']) unified_db << 'has_sense({},{})'.format( q['?w2'], q['?s2']) infstep.outdbs.append(unified_db) except NoConstraintsError: logger.debug('No properties found. Passing db...') infstep.outdbs.append(db.copy()) except Exception: logger.error('Something went wrong') traceback.print_exc() pngs['PropExtraction - ' + str(i)] = get_cond_prob_png( project.queryconf.get('queries', ''), infstep.indbs, filename=self.name) infstep.png = pngs infstep.applied_settings = project.queryconf.config return [node]
def __call__(self, node, **params): # ====================================================================== # Initialization # ====================================================================== logger.debug('inference on {}'.format(self.name)) if self.prac.verbose > 0: print prac_heading('Recognizing {} Roles'.format({ True: 'MISSING', False: 'GIVEN' }[params.get('missing', False)])) dbs = node.outdbs infstep = PRACInferenceStep(node, self) queries = '' wnmod = self.prac.module('wn_senses') actionroles = defaultdict(list) pngs = {} for n, olddb in enumerate(dbs): db_copy = olddb.copy(mln=self.prac.mln) actioncore = node.frame.actioncore logger.debug(actioncore) if params.get('project', None) is None: logger.debug('Loading Project: {}.pracmln'.format( colorize(actioncore, (None, 'cyan', True), True))) projectpath = os.path.join(pracloc.pracmodules, self.name, '{}.pracmln'.format(actioncore)) project = MLNProject.open(projectpath) else: logger.debug( colorize('Loading Project from params', (None, 'cyan', True), True)) projectpath = os.path.join( params.get('projectpath', None) or os.path.join(pracloc.pracmodules, self.name), params.get('project').name) project = params.get('project') queries = project.queryconf.get('queries', '') mlntext = project.mlns.get(project.queryconf['mln'], None) mln = parse_mln(mlntext, searchpaths=[self.module_path], projectpath=projectpath, logic=project.queryconf.get('logic', 'FuzzyLogic'), grammar=project.queryconf.get( 'grammar', 'PRACGrammar')) known_concepts = mln.domains.get('concept', []) # ============================================================== # Preprocessing # ============================================================== # adding senses and similarities. might be obsolete as it has # already been performed in ac recognition logger.debug('adding senses. concepts={}'.format(known_concepts)) db = wnmod.get_senses_and_similarities(db_copy, known_concepts) # we need senses and similarities as well as original evidence tmp_union_db = db.union(db_copy, mln=self.prac.mln) # ignore roles of false ac's new_tmp_union_db = tmp_union_db.copy(mln=self.prac.mln) roles = self.prac.actioncores[actioncore].roles for ac in tmp_union_db.domains['actioncore']: if ac == actioncore: continue for r in roles: for w in new_tmp_union_db.words(): new_tmp_union_db << ('{}({},{})'.format(r, w, ac), 0) infstep.indbs.append(new_tmp_union_db) # ============================================================== # Inference # ============================================================== infer = self.mlnquery(config=project.queryconf, verbose=self.prac.verbose > 2, db=new_tmp_union_db, mln=mln) resultdb = infer.resultdb if self.prac.verbose == 2: print print prac_heading('INFERENCE RESULTS') infer.write() # ============================================================== # Postprocessing # ============================================================== # get query roles for given actioncore and add inference results # for them to final output db. ignore 0-truth results. unified_db = new_tmp_union_db.union(resultdb, mln=self.prac.mln) # node.frame.actionroles = defaultdict(list) for role, word in unified_db.rolesw(actioncore): sense = unified_db.sense(word) props = dict(unified_db.properties(word)) obj = Object(self.prac, id_=word, type_=sense, props=props, syntax=node.pracinfer.buildword(unified_db, word)) actionroles[role].append(obj) # argdoms = kb.query_mln.predicate(role).argdoms roles = self.prac.actioncores[actioncore].roles new_result = PRACDatabase(self.prac) for atom, truth in unified_db.evidence.iteritems(): if any(r in atom for r in roles): (_, predname, args) = self.prac.mln.logic.parse_literal(atom) if not args[-1] == actioncore: continue new_result << (atom, truth) for q in unified_db.query('has_sense(?w, ?s)'): # TODO Add additional formulas to avoid the using of null values if self.prac.verbose > 1: print colorize(' WORD:', (None, 'white', True), True), q['?w'] print colorize(' SENSE:', (None, 'white', True), True), q['?s'] wnmod.printWordSenses( wnmod.get_possible_meanings_of_word( unified_db, q['?w']), q['?s']) print infstep.outdbs.append(new_result) pngs['Recognizing {} roles - {}'.format( 'given', str(n))] = get_cond_prob_png(queries, infstep.indbs, filename=self.name) infstep.png = pngs if 'project' not in locals(): raise Exception('no actioncore in database: %s' % olddb) infstep.applied_settings = project.queryconf.config # pprint(actionroles) newframes = splitd(actionroles) pred = None for newframe in newframes: # pprint(newframe) f = Frame(self.prac, node.frame.sidx, node.frame.sentence, syntax=list(olddb.syntax()), words=node.frame.words, actioncore=node.frame.actioncore, actionroles=newframe) logger.debug('created new frame %s' % f) # for db in infstep.outdbs: # out(db.syntax()) pred = FrameNode(node.pracinfer, f, node, pred, indbs=infstep.outdbs, prevmod=self.name) yield pred