Example #1
0
def test_learning_taxonomies():
    p = '$PRACMLN_HOME/examples/taxonomies/taxonomies.pracmln'
    mln = MLN(mlnfile=('%s:senses_and_roles.mln' % p), grammar='PRACGrammar')
    mln.write()
    dbs = Database.load(mln, dbfiles='%s:training.db' % p)
    for method in ('DPLL', 'DBPLL_CG', 'DCLL'):
        for multicore in (True, False):
            print '=== LEARNING TEST:', method, '==='
            learn(method=method, mln=mln, db=dbs, verbose=True, multicore=multicore, epreds='is_a', discr_preds=EVIDENCE_PREDS).run()
Example #2
0
def test_learning_smokers():
    p = '$PRACMLN_HOME/examples/smokers/smokers.pracmln'
    mln = MLN(mlnfile=('%s:smoking.mln' % p), grammar='StandardGrammar')
    mln.write()
    db = Database(mln, dbfile='%s:smoking-train.db' % p)
    for method in ('BPLL', 'BPLL_CG', 'CLL'):
        for multicore in (True, False):
            print '=== LEARNING TEST:', method, '==='
            learn(method=method, mln=mln, db=db, verbose=True, multicore=multicore).run()
Example #3
0
def test_GSMLN():
    # mln = MLN(grammar='GSMLNGrammar')
    # mln << 'residue(id, profile)'
    # mln << 'partners(id, id)'

    # f = "residue(a, $pa) v residue(b, $pb) => partners(a,b)"
    # # f = "((a(x) ^ b(x)) v (c(x) ^ !(d(x) ^ e(x) ^ g(x)))) => f(x)"
    # # f = "(a(x) v (b(x) ^ c(x))) => f(x)"
    # f = mln.logic.grammar.parse_formula(f)
    # f.print_structure()
    # print(list(f.literals()))

    # g = "partners(id, id)"
    # g = mln.logic.grammar.parse_predicate(g)
    # print(g)

    # print(mln.predicates)
    mln = MLN(mlnfile='smokers.mln', grammar='GSMLNGrammar')
    # mln.write()
    # print(mln.predicates)
    dbs = Database.load(mln, dbfiles='smokers.db')
    # dbs[0].write()
    # print(mln.formulas[0].neural)
    # print(mln.formulas[0].cnf())
    # print(mln.nnformulas[0].idx)
    # print(mln.domains)
    # print(dbs[0].domains)
    # mln.formulas[1].print_structure()

    # mrf = mln.ground(dbs[0])
    # grounder = DefaultGroundingFactory(mrf, simplify=False, unsatfailure=True, verbose=False, cache=0)
    # for f in grounder.itergroundings():
    #     print(f)

    # print((mrf.gndatoms))
    # mln = MLN(grammar='GSMLNGrammar')
    # mln << 'Cancer(&person)'
    # mln << 'Friends(&person,&person)'
    # mln << 'Smokes(&person)'

    # f = 'Smokes($x) => Cancer($x)'
    # g = 'Friends($x,$y) => (Smokes($x) <=> Smokes($y))'
    # print(mln.logic.grammar.parse_formula(f))
    # mln.formula(f)
    # mln.formula(g)
    # print(mln.predicates)
    # print(mln.formulas)
    # mln.formulas[0].print_structure()
    # print(mln.domains)
    # print(mln.formulas[0].cnf())

    # this uses the method from base.py
    learned_mln = mln.learn(databases=dbs, method=GSMLN_L, verbose=True)
Example #4
0
def test_learning_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers.pracmln')
    mln = MLN(mlnfile=('%s:smoking.mln' % p), grammar='StandardGrammar')
    mln.write()
    db = Database(mln, dbfile='%s:smoking-train.db' % p)
    for method in ('BPLL', 'BPLL_CG', 'CLL'):
        for multicore in (True, False):
            print('=== LEARNING TEST:', method, '===')
            learn(method=method,
                  mln=mln,
                  db=db,
                  verbose=True,
                  multicore=multicore).run()
Example #5
0
def test_learning_taxonomies():
    p = os.path.join(locs.examples, 'taxonomies', 'taxonomies.pracmln')
    mln = MLN(mlnfile=('%s:senses_and_roles.mln' % p), grammar='PRACGrammar')
    mln.write()
    dbs = Database.load(mln, dbfiles='%s:training.db' % p)
    for method in ('DPLL', 'DBPLL_CG', 'DCLL'):
        for multicore in (True, False):
            print('=== LEARNING TEST:', method, '===')
            learn(method=method,
                  mln=mln,
                  db=dbs,
                  verbose=True,
                  multicore=multicore,
                  epreds='is_a',
                  discr_preds=EVIDENCE_PREDS).run()
Example #6
0
def main(arg='.'):  #(arg='..'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    #pth = os.path.join(arg, 'learnt.cll.scenes-new-object-detection.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    #mln = MLN(mlnfile=pth, grammar='PRACGrammar')
    pth = os.path.join(arg, 'smoking-test.db')
    #pth = os.path.join(arg, 'test.db')
    db = Database(mln, dbfile=pth)
    #with open(os.path.join(arg, 'man_performance.txt'), 'a') as fl:

    #start = time.time()
    #t = 1000* timeit.timeit("test(False)", setup="from __main__ import test, set; set()", number=10)

    #MULTICORE:#
    #query(queries='Cancer,Smokes,Friends', method='EnumerationAsk', mln=mln, db=db, verbose=False, multicore=True).run()

    #t1 = time.time()-start
    #print('single core exact inference test: {}'.format(t))
    #fl.write(str(t))
    #fl.write('\t(SingleCore)\n')
    start = time.time()
    #t = 1000* timeit.timeit("test(True)", setup="from __main__ import test, set; set()", number=10)
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Example #7
0
 def learning(self):
     mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar')
     db = Database(mln, dbfile=self.db_path)
     for method in ('BPLL', 'BPLL_CG', 'CLL'):
         print('=== LEARNING TEST:', method, '===')
         learn(method=method, mln=mln, db=db, verbose=True,
               multicore=False).run()
Example #8
0
    def initialize(self):
        logger.debug('initializing nl_parsing')

        self.mln = MLN(mlnfile=os.path.join(self.module_path, 'mln',
                                            'predicates.mln'),
                       grammar='PRACGrammar',
                       logic='FuzzyLogic')
Example #9
0
def main(arg='.'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-smaller.db')
    db = Database(mln, dbfile=pth)
    #with open(os.path.join(arg, 'man_performance.txt'), 'a') as fl:

    start = time.time()
    #t = 1000* timeit.timeit("test(False)", setup="from __main__ import test, set; set()", number=10)
    query(queries='Cancer,Smokes,Friends',
          method='MC-SAT',
          mln=mln,
          db=db,
          verbose=False,
          multicore=True).run()
    t1 = time.time() - start
    #print('single core exact inference test: {}'.format(t))
    #fl.write(str(t))
    #fl.write('\t(SingleCore)\n')
    start = time.time()
    #t = 1000* timeit.timeit("test(True)", setup="from __main__ import test, set; set()", number=10)
    query(queries='Cancer,Smokes,Friends',
          method='MC-SAT',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    #print('multi core exact inference test: {}'.format(t))
    print('Inference, MC-SAT, {}, {}'.format(t1, t2))
Example #10
0
 def construct_global_mln(self):
     '''
     Reads all predicte declaration MLNs of all modules and returns an MLN
     with all predicates declared.
     '''
     mln = MLN(logic='FuzzyLogic', grammar='PRACGrammar')
     for name, manifest in list(self._manifests_by_name.items()):
         module_path = manifest.module_path
         decl_mlns = manifest.pred_decls
         for mlnfile in decl_mlns:
             tmpmln = MLN(mlnfile=os.path.join(praclocations.pracmodules,
                                               module_path, 'mln', mlnfile),
                          logic='FuzzyLogic',
                          grammar='PRACGrammar')
             mln.update_predicates(tmpmln)
     return mln
Example #11
0
def test_mln():
    mln = MLN()
    mln << 'foo(x)' # predicate declaration
    mln << 'bar(y)' # another pred declaration
    mln << 'bar(?x) => bar(?y).' # hard logical constraint
    mln << 'logx(.75)/log(.25) foo(?x)' # weighted formula
    print('mln write:')
    mln.write()
    print('mln predicates:')
    for pred in mln.predicates:
        print(repr(pred))
    print('mln formulas:')
    for f in mln.formulas:
        print(f)
        f.print_structure()  
    return mln 
Example #12
0
def main(args, options):
    #===========================================================================
    # Load the NL parsing MLN
    #===========================================================================
    mln = MLN(mlnfile=os.path.join(prac.locations.pracmodules, 'nl_parsing',
                                   'mln', 'predicates.mln'),
              grammar='PRACGrammar',
              logic='FuzzyLogic')

    #===========================================================================
    # Load the Java VM
    #===========================================================================
    if not java.isJvmRunning():
        java.initJvm()
    if not jpype.isThreadAttachedToJVM():
        jpype.attachThreadToJVM()

    #===========================================================================
    # # suppress the stderr outputs from the parser
    #===========================================================================
    jpype.java.lang.System.setErr(jpype.java.io.PrintStream(os.devnull))

    #===========================================================================
    # Initialize the parser
    #===========================================================================
    stanford_parser = StanfordParser(grammar_path)
    dbs = []
    sentences = args
    for s in sentences:
        db = ''
        deps = stanford_parser.get_dependencies(json.loads(s), True)
        deps = map(str, deps)
        words = set()
        for d in deps:
            # replace : by _ in stanford predicates
            res = re.match('(!?)(.+)\((.+)\)$', d)
            if res:
                d = '{}{}({})'.format(res.group(1),
                                      res.group(2).replace(':', '_'),
                                      res.group(3))
            _, pred, args = mln.logic.parse_literal(str(d))
            words.update(args)
            db += '{}({})\n'.format(pred, ', '.join(args))
        postags = stanford_parser.get_pos()
        pos = []
        for pos in postags.values():
            if not pos[0] in words:
                continue
            postagatom = 'has_pos({},{})'.format(pos[0], pos[1])
            pos.append(postagatom)
            db += '{}\n'.format(postagatom)
            postags[pos[0]] = pos[1]
        dbs.append(db)
    result = '---\n'.join(dbs)
    if options.outfile is not None:
        with open(options.outfile, 'w+') as f:
            f.write(result)
    else:
        print result
Example #13
0
    def train(self, praclearning):

        print prac_heading('Training knowledgebase')

        mlnName = praclearning.otherParams.get('mln', None)
        mlnLogic = praclearning.otherParams.get('logic', None)
        objName = praclearning.otherParams.get('concept', None)
        onTheFly = praclearning.otherParams.get('onthefly', False)

        mln = MLN(mlnfile=os.path.abspath(mlnName), logic=mlnLogic,
                  grammar='PRACGrammar')

        pracTrainingDBS = praclearning.training_dbs
        trainingDBS = []

        if len(pracTrainingDBS) >= 1 and type(
                pracTrainingDBS[0]) is str:  # db from file
            logger.info('Learning from db files...')
            inputdbs = Database.load(mln, dbfile=pracTrainingDBS,
                                     ignore_unknown_preds=True)
            trainingDBS += inputdbs
        elif len(pracTrainingDBS) > 1:
            logger.info('Learning from db files (xfold)...')
            trainingDBS = pracTrainingDBS
        else:  # db from inference result
            logger.info('Learning from inference result...')
            inputdbs = pracTrainingDBS
            for db in inputdbs:
                db << 'object(cluster, {})'.format(objName)
                trainingDBS.append(db)

        outputfile = '{}_trained.mln'.format(mlnName.split('.')[0])

        # learning mln
        trainedMLN = mln.learnWeights(trainingDBS, LearningMethods.DCLL,
                                      evidencePreds=possibleProps, partSize=1,
                                      gaussianPriorSigma=10, useMultiCPU=0,
                                      optimizer='cg', learningRate=0.9)

        print prac_heading('Learnt Formulas')

        trainedMLN.printFormulas()
        trainedMLN.write(file(outputfile, "w"))

        return trainedMLN
    def model_config(self,predicate,formula,database,mln_path,db_path):
        """
        Returns the database and mln objects in MLN format
        --Inputs--
        predicate: predicate object with parsed predicates
        formula: formula object with parsed predicates
        database:.txt file containing the database(s)
        mln_path: .mln file name to save the learned weights per formula
        db_path: .db file to save the progress of the database learning
        """
        base_path = os.getcwd()
        mln = MLN(grammar='PRACGrammar',logic='FirstOrderLogic') #Parsing with PRACGrammar since we are using clusters
        for i in predicate:
            mln << i
            print('input predicate successful:'+i)
        for i in formula:
            mln << i
            print('input formula successful :'+i)

        mln.write()
        mln.tofile(base_path + '/'+ mln_path)

        db = Database.load(mln,database)
        #db.write()
        #db.tofile(base_path + '/'+ db_path)
        return (db,mln)
Example #15
0
    def model_config(predicate, formula, database, mln_path,
                     db_path):  # mln_path,db_path 為string
        base_path = os.getcwd()
        mln = MLN(grammar='StandardGrammar', logic='FirstOrderLogic')
        for i in predicate:
            mln << i
            print('input predicate successful:' + i)
        for i in formula:
            mln << i
            print('input formula successful :' + i)
        mln.write()
        mln.tofile(base_path + '\\' + mln_path)  #把谓语数据储存成 mln_path.mln 档案
        db = Database(mln)
        try:
            for i in enumerate(database):
                db << i[1][1]
                print('input database successful : ' + i[1][0] + ' : ' +
                      i[1][1])
        except:
            for j in database[i[0]::]:
                db << j[1]

        db.write()
        db.tofile(base_path + '\\' + db_path)  #把证据数据储存成 db_path.db 档案
        return (db, mln)
Example #16
0
def main(arg='.'):
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-tiny.db')
    db = Database(mln, dbfile=pth)
    start = time.time()
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Example #17
0
def test_inference_taxonomies():
    p = os.path.join(locs.examples, 'taxonomies', 'taxonomies.pracmln')
    mln = MLN(mlnfile=('%s:wts.learned.taxonomy.mln' % p),
              grammar='PRACGrammar',
              logic='FuzzyLogic')
    db = Database(mln, dbfile='%s:evidence.db' % p)
    for method in ('EnumerationAsk', 'WCSPInference'):
        print('=== INFERENCE TEST:', method, '===')
        query(queries='has_sense, action_role',
              method=method,
              mln=mln,
              db=db,
              verbose=False,
              cw=True).run().write()
Example #18
0
def test_inference_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers.pracmln')
    mln = MLN(mlnfile=('%s:wts.pybpll.smoking-train-smoking.mln' % p),
              grammar='StandardGrammar')
    db = Database(mln, dbfile='%s:smoking-test-smaller.db' % p)
    for method in ('EnumerationAsk', 'MC-SAT', 'WCSPInference',
                   'GibbsSampler'):
        for multicore in (False, True):
            print('=== INFERENCE TEST:', method, '===')
            query(queries='Cancer,Smokes,Friends',
                  method=method,
                  mln=mln,
                  db=db,
                  verbose=True,
                  multicore=multicore).run()
Example #19
0
def test_inference_smokers():
    p = os.path.join(locs.examples, 'smokers', 'smokers')
    # mln = MLN(mlnfile=('%s:wts.pybpll.smoking-train-smoking.mln' % p),
    #           grammar='StandardGrammar')
    # db = Database(mln, dbfile='%s:smoking-test-smaller.db' % p)
    print(p)
    mln = MLN(mlnfile=('%s.mln' % p), grammar='StandardGrammar')
    db = Database(mln, dbfile='%s.db' % p)
    for method in ('GibbsSampler', ):
        print('=== INFERENCE TEST:', method, '===')
        query(queries='Cancer,Smokes,Friends',
              method=method,
              mln=mln,
              db=db,
              verbose=True,
              multicore=False).run()
Example #20
0
def main(arg='.'):  #(arg='..'):
    #if (len(sys))
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    #pth = os.path.join(arg, 'learnt.cll.scenes-new-object-detection.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    #mln = MLN(mlnfile=pth, grammar='PRACGrammar')
    pth = os.path.join(arg, 'smoking-test-small.db')
    #pth = os.path.join(arg, 'test.db')
    db = Database(mln, dbfile=pth)
    start = time.time()
    query(method='EnumerationAsk',
          mln=mln,
          db=db,
          verbose=False,
          multicore=False).run()
    t2 = time.time() - start
    print('exact inference test: {}'.format(t2))
Example #21
0
def test_learning_smokers(arg='.', disp=False):
    pth = os.path.join(arg, 'smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-train.db')
    db = Database(mln, dbfile=pth)
    for method in ('BPLL', 'BPLL_CG', 'CLL'):
        for multicore in (True, False):
            print('=== LEARNING TEST:', method, '===')
            if disp:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run().write()
            else:
                learn(method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run()
Example #22
0
 def inference(self, inference_query):
     mln = MLN(mlnfile=self.mln_path, grammar='StandardGrammar')
     db = Database(mln, dbfile=self.db_path)
     for method in [
             'EnumerationAsk'
             # 'MC-SAT',
             # 'WCSPInference',
             # 'GibbsSampler'
     ]:
         print('=== INFERENCE TEST:', method, '===')
         result = query(queries=inference_query,
                        method=method,
                        mln=mln,
                        db=db,
                        verbose=True,
                        multicore=False).run()
         print(result)
     if result:
         return result
     else:
         return []
Example #23
0
def test_inference_smokers(arg='.', disp=False):
    pth = os.path.join(arg, 'wts.pybpll.smoking-train-smoking.mln')
    mln = MLN(mlnfile=pth, grammar='StandardGrammar')
    pth = os.path.join(arg, 'smoking-test-smaller.db')
    db = Database(mln, dbfile=pth)
    for method in ('EnumerationAsk', 'MC-SAT', 'WCSPInference',
                   'GibbsSampler'):
        for multicore in (False, True):
            print('=== INFERENCE TEST:', method, '===')
            if disp:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run().write()
            else:
                query(queries='Cancer,Smokes,Friends',
                      method=method,
                      mln=mln,
                      db=db,
                      verbose=disp,
                      multicore=multicore).run()
Example #24
0
def main():
    path = os.path.join('Vessel Traffic Data sub-areas April 2020',
                        'cts_sub-areas_04_2020_pt',
                        'cts_bass_st_04_2020_pt.shp')
    df = gpd.read_file(path)
    mln = MLN()
    populatePredicates(mln)
    populateFormulas(mln)
    mln.write()
    dbs = []
    for i in range(100):
        collection = df.sample(n=3)
        lons = collection['LON'].tolist()
        lats = collection['LAT'].tolist()
        points = list(zip(lons, lats))
        evidence = ThreePointEvidence(*points)
        db = Database(mln)
        evidence.generateEvidence(db)
        dbs.append(db)
    mln.learn(dbs)
Example #25
0
def test_reasoning():
    mln = MLN.load(files='./mln/alarm.mln')
    db = Database.load(mln, './mln/alarm.db')
    result = MLNQuery(mln=mln, db=db).run()
    result.write()
    parser.add_argument("-q", "--query", help="query MLN", action="store_true")
    args = parser.parse_args()

    if args.learn:
        print('you chose learn the weights for the mln')
        predicate = s.read_predicate('predicate.txt')
        formula = s.read_formula('formula.txt',predicate)
        data,mln = s.model_config(predicate,formula,'data.txt','results.mln','results.db')
        with open('base.mln', 'wb') as base_mln_file:
             pickle.dump(mln, base_mln_file)

        output = s.activate_model(data,mln)
        output.tofile(os.getcwd() + '/' + 'learnt_mln.mln')
    elif args.query:
        print('you chose to query the mln')
        mln = MLN.load(files='learnt_mln.mln')
        infer_world = Database.load(mln,'inference_data.txt')
        s.inference('query.txt',infer_world,mln)
    else:
        print ('please input learn (-l) or query (-q) to proceed')


#query(queries='Cancer(x)', method='MC-SAT', mln=mln, db=data, verbose=False, multicore=True).run().results





# =============================================================================
#         predicate_list = [(x,x.lower()) for x in predicate_list]
#         predicate = [x.replace(' ','').lower() for x in predicate if x !='']
Example #27
0
from pracmln import MLN
from pracmln import Database
from pracmln import MLNQuery

mln = MLN(mlnfile='./data/smokers/mlns/smoking_trained.mln',grammar='PRACGrammar', logic='FirstOrderLogic')
mln.write()

db = Database.load(mln,'./data/smokers/dbs/smoking-test.db')[0]
db.write()

print("Running Query...")
result = MLNQuery(mln=mln, db=db).run()
print(result)
Example #28
0
 parser.add_argument("--output_mln",
                     type=str,
                     help="(.mln)",
                     nargs="?",
                     default="./models/initial.mln")
 args = parser.parse_args()
 # loads the data for MLN
 roles = utils.load_roles(args.roles_file)
 instances = []
 for dataset in args.input_datasets:
     instances += utils.load_flattened_data(dataset)
 role_constraints = utils.get_role_constraints(roles, instances)
 formulas = get_formulas(args.formula_file)
 domains = get_domains(roles, instances)
 # generates the markov logic network
 mln = MLN(logic="FirstOrderLogic", grammar="PRACGrammar")
 for domain, values in domains.items():  # domains
     for value in values:
         if len(value) > 1:
             const = ''.join(value)
         elif len(value) > 0:
             const = value[0]
         else:
             const = "None"
         mln.update_domain({domain: [const]})
 for role in roles.keys():  # predicates
     mln.predicate(Predicate(
         role, [role + "_d!"]))  # hard-functional constraints only
 for formula in formulas:  # formulas
     formula_str = "0.0 "
     for idx in range(len(formula)):
Example #29
0
    def learn(self, savegeometry=True, options=None, *_):
        if options is None:
            options = {}
        mln_content = self.mln_container.editor.get("1.0", END).strip()
        db_content = self.db_container.editor.get("1.0", END).strip()

        # create conf from current gui settings
        self.update_config()

        # write gui settings
        self.write_gconfig(savegeometry=savegeometry)

        # hide gui
        self.master.withdraw()

        try:
            print((headline('PRAC LEARNING TOOL')))
            print()

            if options.get('mlnarg') is not None:
                mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')),
                             logic=self.config.get('logic', 'FirstOrderLogic'),
                             grammar=self.config.get('grammar', 'PRACGrammar'))
            else:
                mlnobj = parse_mln(mln_content, searchpaths=[self.project_dir],
                                   projectpath=os.path.join(self.project_dir, self.project.name),
                                   logic=self.config.get('logic', 'FirstOrderLogic'),
                                   grammar=self.config.get('grammar', 'PRACGrammar'))

            if options.get('dbarg') is not None:
                dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True))
            else:
                if self.config.get('pattern'):
                    local, dblist = self.get_training_db_paths(self.config.get('pattern').strip())
                    dbobj = []
                    # build database list from project dbs
                    if local:
                        for dbname in dblist:
                            dbobj.extend(parse_db(mlnobj, self.project.dbs[dbname].strip(),
                                         ignore_unknown_preds=self.config.get('ignore_unknown_preds', True),
                                         projectpath=os.path.join(self.dir, self.project.name)))
                        out(dbobj)
                    # build database list from filesystem dbs
                    else:
                        for dbpath in dblist:
                            dbobj.extend(Database.load(mlnobj, dbpath, ignore_unknown_preds= self.config.get('ignore_unknown_preds', True)))
                # build single db from currently selected db
                else:
                    dbobj = parse_db(mlnobj, db_content, projectpath=os.path.join(self.dir, self.project.name), dirs=[self.dir])

            learning = MLNLearn(config=self.config, mln=mlnobj, db=dbobj)
            result = learning.run()

            # write to file if run from commandline, otherwise save result
            # to project results
            if options.get('outputfile') is not None:
                output = io.StringIO()
                result.write(output)
                with open(os.path.abspath(options.get('outputfile')), 'w') as f:
                    f.write(output.getvalue())
                logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile'))))
            elif self.save.get():
                output = io.StringIO()
                result.write(output)
                self.project.add_mln(self.output_filename.get(), output.getvalue())
                self.mln_container.update_file_choices()
                self.project.save(dirpath=self.project_dir)
                logger.info('saved result to file mln/{} in project {}'.format(self.output_filename.get(), self.project.name))
            else:
                logger.debug("No output file given - results have not been saved.")
        except:
            traceback.print_exc()

        # restore gui
        sys.stdout.flush()
        self.master.deiconify()
Example #30
0
                     help="(.db)",
                     nargs="?",
                     default="./data/train.db")
 parser.add_argument("--output_mln",
                     type=str,
                     help="models",
                     nargs="?",
                     default="./models/class_learned.mln")
 parser.add_argument("--shuffle",
                     type=bool,
                     help="1 or 0",
                     nargs="?",
                     default="True")
 args = parser.parse_args()
 # loads the initial MLN and DBs
 mln = MLN.load(args.input_mln)
 dbs = Database.load(mln, args.input_database)
 if args.shuffle:
     shuffle(dbs)
 # runs the learning on the markov logic network to get weights
 start = time()
 learned_mln = MLNLearn(mln=mln,
                        db=dbs,
                        verbose=True,
                        method="BPLL_CG",
                        use_prior=True,
                        multicore=True).run()
 learned_mln.tofile(args.output_mln)
 duration = int((time() - start) / 60.0)
 with open(
         "./results/" + args.output_mln.split("/")[2].split(".")[0] +
Example #31
0
 def initialize(self):
     self.mln = MLN(mlnfile=os.path.join(self.module_path, 'mln',
                                         'predicates.mln'),
                    logic='FuzzyLogic',
                    grammar='PRACGrammar')
     self.wordnetKBs = {}
Example #32
0
                valmap = dict([(val, computeClosestCluster(val, self.clusters[domain])[1][0]) for val in newdb.domains[domain]])
                newdb.domains[domain] = valmap.values()
                # replace the affected evidences
                for ev in newdb.evidence.keys():
                    truth = newdb.evidence[ev]
                    _, pred, params = db.mln.logic.parse_literal(ev)
                    if domain in self.mln.predicate(pred).argdoms:  # domain is affected by the mapping
                        newdb.retract(ev)
                        newargs = [v if domain != self.mln.predicate(pred).argdoms[i] else valmap[v] for i, v in enumerate(params)]
                        atom = '%s%s(%s)' % ('' if truth else '!', pred, ','.join(newargs))
                        newdb << atom
            newdbs.append(newdb)
        return newdbs

            
if __name__ == '__main__':
    mln = MLN.load('/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:object-detection.mln')
    dbs = Database.load(mln, '/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:scenes-new.db')

    # do some plain clustering on texts
    s = ['otto', 'otte', 'obama', 'markov logic network', 'markov logic', 'otta', 'markov random field']
    s = set([val for db in Database.load(mln, '/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:scenes-new.db') for val in db.domains['text']])
    clusters = SAHN(s)
    for c in clusters:
        print(c)

    # apply clustering to a set of databases
    cluster = NoisyStringClustering(mln, ['text'])
    cluster.materialize(dbs)