def main(): path = os.path.join('Vessel Traffic Data sub-areas April 2020', 'cts_sub-areas_04_2020_pt', 'cts_bass_st_04_2020_pt.shp') df = gpd.read_file(path) mln = MLN() populatePredicates(mln) populateFormulas(mln) mln.write() dbs = [] for i in range(100): collection = df.sample(n=3) lons = collection['LON'].tolist() lats = collection['LAT'].tolist() points = list(zip(lons, lats)) evidence = ThreePointEvidence(*points) db = Database(mln) evidence.generateEvidence(db) dbs.append(db) mln.learn(dbs)
def test_GSMLN(): # mln = MLN(grammar='GSMLNGrammar') # mln << 'residue(id, profile)' # mln << 'partners(id, id)' # f = "residue(a, $pa) v residue(b, $pb) => partners(a,b)" # # f = "((a(x) ^ b(x)) v (c(x) ^ !(d(x) ^ e(x) ^ g(x)))) => f(x)" # # f = "(a(x) v (b(x) ^ c(x))) => f(x)" # f = mln.logic.grammar.parse_formula(f) # f.print_structure() # print(list(f.literals())) # g = "partners(id, id)" # g = mln.logic.grammar.parse_predicate(g) # print(g) # print(mln.predicates) mln = MLN(mlnfile='smokers.mln', grammar='GSMLNGrammar') # mln.write() # print(mln.predicates) dbs = Database.load(mln, dbfiles='smokers.db') # dbs[0].write() # print(mln.formulas[0].neural) # print(mln.formulas[0].cnf()) # print(mln.nnformulas[0].idx) # print(mln.domains) # print(dbs[0].domains) # mln.formulas[1].print_structure() # mrf = mln.ground(dbs[0]) # grounder = DefaultGroundingFactory(mrf, simplify=False, unsatfailure=True, verbose=False, cache=0) # for f in grounder.itergroundings(): # print(f) # print((mrf.gndatoms)) # mln = MLN(grammar='GSMLNGrammar') # mln << 'Cancer(&person)' # mln << 'Friends(&person,&person)' # mln << 'Smokes(&person)' # f = 'Smokes($x) => Cancer($x)' # g = 'Friends($x,$y) => (Smokes($x) <=> Smokes($y))' # print(mln.logic.grammar.parse_formula(f)) # mln.formula(f) # mln.formula(g) # print(mln.predicates) # print(mln.formulas) # mln.formulas[0].print_structure() # print(mln.domains) # print(mln.formulas[0].cnf()) # this uses the method from base.py learned_mln = mln.learn(databases=dbs, method=GSMLN_L, verbose=True)