Beispiel #1
0
 def learning(self):
     mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar')
     db = Database(mln, dbfile=self.db_path)
     for method in ('BPLL', 'BPLL_CG', 'CLL'):
         print('=== LEARNING TEST:', method, '===')
         learn(method=method, mln=mln, db=db, verbose=True,
               multicore=False).run()
Beispiel #2
0
def main(arg='.'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-smaller.db')
    db = Database(mln, dbfile=pth)
    #with open(os.path.join(arg, 'man_performance.txt'), 'a') as fl:

    start = time.time()
    #t = 1000* timeit.timeit("test(False)", setup="from __main__ import test, set; set()", number=10)
    query(queries='Cancer,Smokes,Friends',
          method='MC-SAT',
          mln=mln,
          db=db,
          verbose=False,
          multicore=True).run()
    t1 = time.time() - start
    #print('single core exact inference test: {}'.format(t))
    #fl.write(str(t))
    #fl.write('\t(SingleCore)\n')
    start = time.time()
    #t = 1000* timeit.timeit("test(True)", setup="from __main__ import test, set; set()", number=10)
    query(queries='Cancer,Smokes,Friends',
          method='MC-SAT',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    #print('multi core exact inference test: {}'.format(t))
    print('Inference, MC-SAT, {}, {}'.format(t1, t2))
Beispiel #3
0
    def train(self, prac_learning):

        training_dbs = []
        if hasattr(prac_learning,
                   'training_dbs') and prac_learning.training_dbs is not None:
            for dbfile in prac_learning.training_dbs:
                training_dbs.extend(
                    Database(self.mln,
                             dbfile=dbfile,
                             ignore_unknown_preds=True))
        else:
            for dbfile in self.prac.training_dbs():
                db = Database(self.mln,
                              dbfile=dbfile,
                              ignore_unknown_preds=True)
                training_dbs.append(db)
Beispiel #4
0
def main(arg='.'):  #(arg='..'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    #pth = os.path.join(arg, 'learnt.cll.scenes-new-object-detection.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    #mln = MLN(mlnfile=pth, grammar='PRACGrammar')
    pth = os.path.join(arg, 'smoking-test.db')
    #pth = os.path.join(arg, 'test.db')
    db = Database(mln, dbfile=pth)
    #with open(os.path.join(arg, 'man_performance.txt'), 'a') as fl:

    #start = time.time()
    #t = 1000* timeit.timeit("test(False)", setup="from __main__ import test, set; set()", number=10)

    #MULTICORE:#
    #query(queries='Cancer,Smokes,Friends', method='EnumerationAsk', mln=mln, db=db, verbose=False, multicore=True).run()

    #t1 = time.time()-start
    #print('single core exact inference test: {}'.format(t))
    #fl.write(str(t))
    #fl.write('\t(SingleCore)\n')
    start = time.time()
    #t = 1000* timeit.timeit("test(True)", setup="from __main__ import test, set; set()", number=10)
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Beispiel #5
0
    def model_config(predicate, formula, database, mln_path,
                     db_path):  # mln_path,db_path 為string
        base_path = os.getcwd()
        mln = MLN(grammar='StandardGrammar', logic='FirstOrderLogic')
        for i in predicate:
            mln << i
            print('input predicate successful:' + i)
        for i in formula:
            mln << i
            print('input formula successful :' + i)
        mln.write()
        mln.tofile(base_path + '\\' + mln_path)  #把谓语数据储存成 mln_path.mln 档案
        db = Database(mln)
        try:
            for i in enumerate(database):
                db << i[1][1]
                print('input database successful : ' + i[1][0] + ' : ' +
                      i[1][1])
        except:
            for j in database[i[0]::]:
                db << j[1]

        db.write()
        db.tofile(base_path + '\\' + db_path)  #把证据数据储存成 db_path.db 档案
        return (db, mln)
Beispiel #6
0
def test_db():
    mln = test_mln()
    db = Database(mln)
    db << 'foo(X)'
    db['bar(Y)'] = .0
    print('db write:')
    db.write()
    del db['bar(Y)']
    print('db write:')
    db.write()
    return (mln, db)
Beispiel #7
0
def test_learning_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers.pracmln')
    mln = MLN(mlnfile=('%s:smoking.mln' % p), grammar='StandardGrammar')
    mln.write()
    db = Database(mln, dbfile='%s:smoking-train.db' % p)
    for method in ('BPLL', 'BPLL_CG', 'CLL'):
        for multicore in (True, False):
            print('=== LEARNING TEST:', method, '===')
            learn(method=method,
                  mln=mln,
                  db=db,
                  verbose=True,
                  multicore=multicore).run()
Beispiel #8
0
def main(arg='.'):
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-tiny.db')
    db = Database(mln, dbfile=pth)
    start = time.time()
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Beispiel #9
0
    def infer(self, pracinference):
        inf_step = PRACInferenceStep(pracinference, self)
        for db in pracinference.get_inference_steps_of_module(
                'nl_parsing').output_dbs:

            database = Database(self.prac.mln)
            for truth, gndLit in db.iterGroundLiteralStrings():
                database << (gndLit, truth)
                logger.info(gndLit)
            logger.info('Adding all similarities...')
            self.addPossibleWordSensesToDBs(database)
            inf_step.output_dbs.append(database)
        return inf_step
Beispiel #10
0
def test_inference_taxonomies():
    p = os.path.join(locs.examples, 'taxonomies', 'taxonomies.pracmln')
    mln = MLN(mlnfile=('%s:wts.learned.taxonomy.mln' % p),
              grammar='PRACGrammar',
              logic='FuzzyLogic')
    db = Database(mln, dbfile='%s:evidence.db' % p)
    for method in ('EnumerationAsk', 'WCSPInference'):
        print('=== INFERENCE TEST:', method, '===')
        query(queries='has_sense, action_role',
              method=method,
              mln=mln,
              db=db,
              verbose=False,
              cw=True).run().write()
Beispiel #11
0
def test_inference_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers.pracmln')
    mln = MLN(mlnfile=('%s:wts.pybpll.smoking-train-smoking.mln' % p),
              grammar='StandardGrammar')
    db = Database(mln, dbfile='%s:smoking-test-smaller.db' % p)
    for method in ('EnumerationAsk', 'MC-SAT', 'WCSPInference',
                   'GibbsSampler'):
        for multicore in (False, True):
            print('=== INFERENCE TEST:', method, '===')
            query(queries='Cancer,Smokes,Friends',
                  method=method,
                  mln=mln,
                  db=db,
                  verbose=True,
                  multicore=multicore).run()
Beispiel #12
0
def test_inference_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers')
    # mln = MLN(mlnfile=('%s:wts.pybpll.smoking-train-smoking.mln' % p),
    #           grammar='StandardGrammar')
    # db = Database(mln, dbfile='%s:smoking-test-smaller.db' % p)
    print(p)
    mln = MLN(mlnfile=('%s.mln' % p), grammar='StandardGrammar')
    db = Database(mln, dbfile='%s.db' % p)
    for method in ('GibbsSampler', ):
        print('=== INFERENCE TEST:', method, '===')
        query(queries='Cancer,Smokes,Friends',
              method=method,
              mln=mln,
              db=db,
              verbose=True,
              multicore=False).run()
Beispiel #13
0
def main(arg='.'):  #(arg='..'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    #pth = os.path.join(arg, 'learnt.cll.scenes-new-object-detection.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    #mln = MLN(mlnfile=pth, grammar='PRACGrammar')
    pth = os.path.join(arg, 'smoking-test-small.db')
    #pth = os.path.join(arg, 'test.db')
    db = Database(mln, dbfile=pth)
    start = time.time()
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Beispiel #14
0
    def get_similarities(self, *dbs):
        '''
        Returns a database containing all possible similarities for the senses
        of the input databases.

        :param dbs: the input databases to be queried
        :return:    a generator yielding an instance of Database
        '''
        wordnet = self.prac.wordnet
        full_domain = mergedom(*[db.domains for db in dbs])
        for db in dbs:
            db_ = Database(self.mln)
            for q in db.query('has_sense(?w, ?s) ^ is_a(?s, ?c)'):
                sense = q['?s']
                concept = q['?c']
                for c in full_domain['concept']:
                    sim = wordnet.similarity(c, concept)
                    db_ << ('is_a({},{})'.format(sense, c), sim)
            yield db_
Beispiel #15
0
 def finalgraph(self, filename=None):
     finaldb = Database(self.prac.mln)
     for step in self.inference_steps:
         for db in step.output_dbs:
             for atom, truth in list(db.evidence.items()):
                 if truth == 0: continue
                 _, predname, args = self.prac.mln.logic.parseLiteral(atom)
                 if predname in self.prac.roles.union(
                     ['has_sense', 'action_core', 'achieved_by']):
                     finaldb << atom
                 #         finaldb.write(sys.stdout, color=True)
     g = Digraph(format='svg', engine='dot')
     g.attr('node', shape='box', style='filled')
     for res in finaldb.query('action_core(?w, ?a) ^ has_sense(?w, ?s)'):
         actioncore = res['?a']
         sense = res['?s']
         predname = 'action_core'
         g.node(actioncore, fillcolor='#bee280')
         g.node(sense)
         g.edge(actioncore, sense, label='is_a')
         roles = self.prac.actioncores[actioncore].roles
         for role in roles:
             for res in db.query('{}(?w, {}) ^ has_sense(?w, ?s)'.format(
                     role, actioncore)):
                 sense = res['?s']
                 g.node(sense)
                 g.edge(actioncore, sense, label=role)
     for res in finaldb.query('achieved_by(?a1, ?a2)'):
         a1 = res['?a1']
         a2 = res['?a2']
         g.node(a1, fillcolor='#bee280')
         g.node(a2, fillcolor='#bee280')
         g.edge(a1, a2, label='achieved_by')
         actioncore = a2
         roles = self.prac.actionroles[actioncore].roles
         for role in roles:
             for res in db.query('{}(?w, {}) ^ has_sense(?w, ?s)'.format(
                     role, actioncore)):
                 sense = res['?s']
                 g.node(sense)
                 g.edge(actioncore, sense, label=role)
     return render_gv(g, filename)
Beispiel #16
0
def main():
    path = os.path.join('Vessel Traffic Data sub-areas April 2020',
                        'cts_sub-areas_04_2020_pt',
                        'cts_bass_st_04_2020_pt.shp')
    df = gpd.read_file(path)
    mln = MLN()
    populatePredicates(mln)
    populateFormulas(mln)
    mln.write()
    dbs = []
    for i in range(100):
        collection = df.sample(n=3)
        lons = collection['LON'].tolist()
        lats = collection['LAT'].tolist()
        points = list(zip(lons, lats))
        evidence = ThreePointEvidence(*points)
        db = Database(mln)
        evidence.generateEvidence(db)
        dbs.append(db)
    mln.learn(dbs)
Beispiel #17
0
    def train(self, praclearning):
        prac = praclearning.prac
        # get all the relevant training databases
        db_files = prac.training_dbs()
        nl_module = prac.module('nl_parsing')
        syntactic_preds = nl_module.mln.predicates
        logger.debug(db_files)
        dbs = [
            x for x in [
                Database(self.mln, dbfile=name, ignore_unknown_preds=True)
                for name in db_files
            ] if type(x) is Database
        ]
        logger.debug(dbs)
        new_dbs = []
        training_dbs = []
        known_concepts = []
        logger.debug(self.mln.domains)
        for db in dbs:
            if not 'actioncore' in db.domains: continue
            if not 'concept' in db.domains: continue
            for c in db.domains['concept']:
                known_concepts.append(c)
            new_dbs.append(db)
        wordnet = prac.wordnet
        for db in new_dbs:
            new_db = db.duplicate()
            for sol in db.query('has_sense(?w, ?s) ^ is_a(?s, ?c)'):
                word = sol['?w']
                sense = sol['?s']
                concept = sol['?c']
                synset = wordnet.synset(concept)
                for known_concept in known_concepts:
                    known_synset = wordnet.synset(known_concept)
                    if known_synset is None or synset is None: sim = 0
                    else: sim = wordnet.wup_similarity(synset, known_synset)
                    new_db << ('is_a(%s,%s)' % (sense, known_concept), sim)
            training_dbs.append(new_db)

        logger.info('Starting training with %d databases'.format(
            len(training_dbs)))
Beispiel #18
0
 def inference(self, inference_query):
     mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar')
     db = Database(mln, dbfile=self.db_path)
     for method in [
             'EnumerationAsk'
             # 'MC-SAT',
             # 'WCSPInference',
             # 'GibbsSampler'
     ]:
         print('=== INFERENCE TEST:', method, '===')
         result = query(queries=inference_query,
                        method=method,
                        mln=mln,
                        db=db,
                        verbose=True,
                        multicore=False).run()
         print(result)
     if result:
         return result
     else:
         return []
Beispiel #19
0
def test_learning_smokers(arg='.', disp=False):
    pth = os.path.join(arg, 'smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-train.db')
    db = Database(mln, dbfile=pth)
    for method in ('BPLL', 'BPLL_CG', 'CLL'):
        for multicore in (True, False):
            print('=== LEARNING TEST:', method, '===')
            if disp:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run().write()
            else:
                learn(method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run()
Beispiel #20
0
def test_inference_smokers(arg='.', disp=False):
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-smaller.db')
    db = Database(mln, dbfile=pth)
    for method in ('EnumerationAsk', 'MC-SAT', 'WCSPInference',
                   'GibbsSampler'):
        for multicore in (False, True):
            print('=== INFERENCE TEST:', method, '===')
            if disp:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run().write()
            else:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run()