def score_mln(mln, role, test_dbs): num_queries = len(mln.domains[role + "_d"]) domain_values = set(deepcopy(mln.domains[role + "_d"])) ranks = {} for idx in range(len(test_dbs)): db = test_dbs[idx] instance_ranks = {} for idq in range(num_queries - 1): try: wcsp = MLNQuery(queries=role, verbose=False, mln=mln, db=db, method="WCSPInference").run() predicted = extract_predicted(mln, wcsp.results) except AssertionError as e: print(e) pdb.set_trace() instance_ranks[predicted] = num_queries - idq db[role + "(" + predicted + ")"] = 0.0 missing = domain_values.difference(set(instance_ranks.keys())) try: assert len(missing) == 1, "missing queries" instance_ranks[list(missing)[0]] = 1 except AssertionError as e: print(e) pdb.set_trace() ranks[idx] = deepcopy(instance_ranks) return ranks
def mlnquery(self, config=None, verbose=None, **params): ''' Wrapper for MLNQuery to replace the resultdb of the inference object with an MLN Database casted to a PRACDatabase :param config: the configuration file for the inference :param verbose: boolean value whether verbosity logs will be printed or not :param params: dictionary of additional settings :return: the inference object ''' infer = MLNQuery(config=config, verbose=verbose, **params).run() pracdb = PRACDatabase(self.prac, db=infer.resultdb) infer._resultdb = pracdb return infer
from pracmln import MLN from pracmln import Database from pracmln import MLNQuery mln = MLN(mlnfile='./data/smokers/mlns/smoking_trained.mln',grammar='PRACGrammar', logic='FirstOrderLogic') mln.write() db = Database.load(mln,'./data/smokers/dbs/smoking-test.db')[0] db.write() print("Running Query...") result = MLNQuery(mln=mln, db=db).run() print(result)
def test_reasoning(): mln = MLN.load(files='./mln/alarm.mln') db = Database.load(mln, './mln/alarm.db') result = MLNQuery(mln=mln, db=db).run() result.write()
if ent_type == "r": db << "IsRoom(" + ent + ")" elif ent_type == "l": db << "IsLocation(" + ent + ")" elif ent_type == "o": db << "IsObject(" + ent + ")" elif ent_type == "a": db << "IsAction(" + ent + ")" elif ent_type == "s": db << "IsState(" + ent + ")" else: print("Error: Unknown entity type for evidence!") exit() for triple_idx in range(triples.shape[0]): triple = triples[triple_idx] h = i2e[triple[0]] r = i2r[triple[1]] t = i2e[triple[2]] db << r + "(" + h + ", " + t + ")" # runs the learning on the markov logic network to get weights start_time = time.time() # learned_result = MLNLearn(mln=mln, db=db, verbose=True, save=True, method="BPLL_CG", output_filename="r_learned_weights.mln", multicore=True).run() # learned_result.tofile("r_learned_weights.mln") learned_result = MLN.load("learned_weights.mln") MLNQuery(queries="OperatesOn(sink-l,carrot-o)", verbose=True, mln=learned_result, db=db).run() print(" ---- %s seconds ---- " % (time.time() - start_time)) pdb.set_trace()
fs = open(learndFileName, 'w') learndMLN.write(stream=fs) fs.close() ###################################### ## testing of mln ###################################### testDB = Database.load(learndMLN, testFileName) dbpredList = [] dbgtList = [] for db in testDB: result = MLNQuery(mln=learndMLN, db=db, method='WCSPInference', multicore=True, queries='object', verbose=True).run() ## find best result thisDBObjList = gtList[pIdx] for entry in thisDBObjList: predObj = entry objVal = result.results[entry] for k, v in result.results.iteritems(): if k.find(entry.split(',')[0]) != -1: if v > objVal: predObj = k predictionList.append(predObj) dbpredList.append(predObj) groundTruthList.append(entry)