def model_config(self,predicate,formula,database,mln_path,db_path): """ Returns the database and mln objects in MLN format --Inputs-- predicate: predicate object with parsed predicates formula: formula object with parsed predicates database:.txt file containing the database(s) mln_path: .mln file name to save the learned weights per formula db_path: .db file to save the progress of the database learning """ base_path = os.getcwd() mln = MLN(grammar='PRACGrammar',logic='FirstOrderLogic') #Parsing with PRACGrammar since we are using clusters for i in predicate: mln << i print('input predicate successful:'+i) for i in formula: mln << i print('input formula successful :'+i) mln.write() mln.tofile(base_path + '/'+ mln_path) db = Database.load(mln,database) #db.write() #db.tofile(base_path + '/'+ db_path) return (db,mln)
def model_config(predicate, formula, database, mln_path, db_path): # mln_path,db_path 為string base_path = os.getcwd() mln = MLN(grammar='StandardGrammar', logic='FirstOrderLogic') for i in predicate: mln << i print('input predicate successful:' + i) for i in formula: mln << i print('input formula successful :' + i) mln.write() mln.tofile(base_path + '\\' + mln_path) #把谓语数据储存成 mln_path.mln 档案 db = Database(mln) try: for i in enumerate(database): db << i[1][1] print('input database successful : ' + i[1][0] + ' : ' + i[1][1]) except: for j in database[i[0]::]: db << j[1] db.write() db.tofile(base_path + '\\' + db_path) #把证据数据储存成 db_path.db 档案 return (db, mln)
def test_learning_taxonomies(): p = '$PRACMLN_HOME/examples/taxonomies/taxonomies.pracmln' mln = MLN(mlnfile=('%s:senses_and_roles.mln' % p), grammar='PRACGrammar') mln.write() dbs = Database.load(mln, dbfiles='%s:training.db' % p) for method in ('DPLL', 'DBPLL_CG', 'DCLL'): for multicore in (True, False): print '=== LEARNING TEST:', method, '===' learn(method=method, mln=mln, db=dbs, verbose=True, multicore=multicore, epreds='is_a', discr_preds=EVIDENCE_PREDS).run()
def test_learning_smokers(): p = '$PRACMLN_HOME/examples/smokers/smokers.pracmln' mln = MLN(mlnfile=('%s:smoking.mln' % p), grammar='StandardGrammar') mln.write() db = Database(mln, dbfile='%s:smoking-train.db' % p) for method in ('BPLL', 'BPLL_CG', 'CLL'): for multicore in (True, False): print '=== LEARNING TEST:', method, '===' learn(method=method, mln=mln, db=db, verbose=True, multicore=multicore).run()
def test_learning_smokers(): p = os.path.join(locs.examples, 'smokers', 'smokers.pracmln') mln = MLN(mlnfile=('%s:smoking.mln' % p), grammar='StandardGrammar') mln.write() db = Database(mln, dbfile='%s:smoking-train.db' % p) for method in ('BPLL', 'BPLL_CG', 'CLL'): for multicore in (True, False): print('=== LEARNING TEST:', method, '===') learn(method=method, mln=mln, db=db, verbose=True, multicore=multicore).run()
def test_learning_taxonomies(): p = os.path.join(locs.examples, 'taxonomies', 'taxonomies.pracmln') mln = MLN(mlnfile=('%s:senses_and_roles.mln' % p), grammar='PRACGrammar') mln.write() dbs = Database.load(mln, dbfiles='%s:training.db' % p) for method in ('DPLL', 'DBPLL_CG', 'DCLL'): for multicore in (True, False): print('=== LEARNING TEST:', method, '===') learn(method=method, mln=mln, db=dbs, verbose=True, multicore=multicore, epreds='is_a', discr_preds=EVIDENCE_PREDS).run()
def test_mln(): mln = MLN() mln << 'foo(x)' # predicate declaration mln << 'bar(y)' # another pred declaration mln << 'bar(?x) => bar(?y).' # hard logical constraint mln << 'logx(.75)/log(.25) foo(?x)' # weighted formula print('mln write:') mln.write() print('mln predicates:') for pred in mln.predicates: print(repr(pred)) print('mln formulas:') for f in mln.formulas: print(f) f.print_structure() return mln
def main(): path = os.path.join('Vessel Traffic Data sub-areas April 2020', 'cts_sub-areas_04_2020_pt', 'cts_bass_st_04_2020_pt.shp') df = gpd.read_file(path) mln = MLN() populatePredicates(mln) populateFormulas(mln) mln.write() dbs = [] for i in range(100): collection = df.sample(n=3) lons = collection['LON'].tolist() lats = collection['LAT'].tolist() points = list(zip(lons, lats)) evidence = ThreePointEvidence(*points) db = Database(mln) evidence.generateEvidence(db) dbs.append(db) mln.learn(dbs)
from pracmln import MLN from pracmln import Database from pracmln import MLNQuery mln = MLN(mlnfile='./data/smokers/mlns/smoking_trained.mln',grammar='PRACGrammar', logic='FirstOrderLogic') mln.write() db = Database.load(mln,'./data/smokers/dbs/smoking-test.db')[0] db.write() print("Running Query...") result = MLNQuery(mln=mln, db=db).run() print(result)