Esempio n. 1
0
                     help="(.db)",
                     nargs="?",
                     default="./data/train.db")
 parser.add_argument("--output_mln",
                     type=str,
                     help="models",
                     nargs="?",
                     default="./models/class_learned.mln")
 parser.add_argument("--shuffle",
                     type=bool,
                     help="1 or 0",
                     nargs="?",
                     default="True")
 args = parser.parse_args()
 # loads the initial MLN and DBs
 mln = MLN.load(args.input_mln)
 dbs = Database.load(mln, args.input_database)
 if args.shuffle:
     shuffle(dbs)
 # runs the learning on the markov logic network to get weights
 start = time()
 learned_mln = MLNLearn(mln=mln,
                        db=dbs,
                        verbose=True,
                        method="BPLL_CG",
                        use_prior=True,
                        multicore=True).run()
 learned_mln.tofile(args.output_mln)
 duration = int((time() - start) / 60.0)
 with open(
         "./results/" + args.output_mln.split("/")[2].split(".")[0] +
    parser.add_argument("-q", "--query", help="query MLN", action="store_true")
    args = parser.parse_args()

    if args.learn:
        print('you chose learn the weights for the mln')
        predicate = s.read_predicate('predicate.txt')
        formula = s.read_formula('formula.txt',predicate)
        data,mln = s.model_config(predicate,formula,'data.txt','results.mln','results.db')
        with open('base.mln', 'wb') as base_mln_file:
             pickle.dump(mln, base_mln_file)

        output = s.activate_model(data,mln)
        output.tofile(os.getcwd() + '/' + 'learnt_mln.mln')
    elif args.query:
        print('you chose to query the mln')
        mln = MLN.load(files='learnt_mln.mln')
        infer_world = Database.load(mln,'inference_data.txt')
        s.inference('query.txt',infer_world,mln)
    else:
        print ('please input learn (-l) or query (-q) to proceed')


#query(queries='Cancer(x)', method='MC-SAT', mln=mln, db=data, verbose=False, multicore=True).run().results





# =============================================================================
#         predicate_list = [(x,x.lower()) for x in predicate_list]
#         predicate = [x.replace(' ','').lower() for x in predicate if x !='']
Esempio n. 3
0
def test_reasoning():
    mln = MLN.load(files='./mln/alarm.mln')
    db = Database.load(mln, './mln/alarm.db')
    result = MLNQuery(mln=mln, db=db).run()
    result.write()
Esempio n. 4
0
                valmap = dict([(val, computeClosestCluster(val, self.clusters[domain])[1][0]) for val in newdb.domains[domain]])
                newdb.domains[domain] = valmap.values()
                # replace the affected evidences
                for ev in newdb.evidence.keys():
                    truth = newdb.evidence[ev]
                    _, pred, params = db.mln.logic.parse_literal(ev)
                    if domain in self.mln.predicate(pred).argdoms:  # domain is affected by the mapping
                        newdb.retract(ev)
                        newargs = [v if domain != self.mln.predicate(pred).argdoms[i] else valmap[v] for i, v in enumerate(params)]
                        atom = '%s%s(%s)' % ('' if truth else '!', pred, ','.join(newargs))
                        newdb << atom
            newdbs.append(newdb)
        return newdbs

            
if __name__ == '__main__':
    mln = MLN.load('/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:object-detection.mln')
    dbs = Database.load(mln, '/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:scenes-new.db')

    # do some plain clustering on texts
    s = ['otto', 'otte', 'obama', 'markov logic network', 'markov logic', 'otta', 'markov random field']
    s = set([val for db in Database.load(mln, '/home/nyga/code/pracmln/examples/object-recognition/object-recognition.pracmln:scenes-new.db') for val in db.domains['text']])
    clusters = SAHN(s)
    for c in clusters:
        print(c)

    # apply clustering to a set of databases
    cluster = NoisyStringClustering(mln, ['text'])
    cluster.materialize(dbs)

Esempio n. 5
0
        if ent_type == "r":
            db << "IsRoom(" + ent + ")"
        elif ent_type == "l":
            db << "IsLocation(" + ent + ")"
        elif ent_type == "o":
            db << "IsObject(" + ent + ")"
        elif ent_type == "a":
            db << "IsAction(" + ent + ")"
        elif ent_type == "s":
            db << "IsState(" + ent + ")"
        else:
            print("Error: Unknown entity type for evidence!")
            exit()
    for triple_idx in range(triples.shape[0]):
        triple = triples[triple_idx]
        h = i2e[triple[0]]
        r = i2r[triple[1]]
        t = i2e[triple[2]]
        db << r + "(" + h + ", " + t + ")"

    # runs the learning on the markov logic network to get weights
    start_time = time.time()
    # learned_result = MLNLearn(mln=mln, db=db, verbose=True, save=True, method="BPLL_CG", output_filename="r_learned_weights.mln", multicore=True).run()
    # learned_result.tofile("r_learned_weights.mln")
    learned_result = MLN.load("learned_weights.mln")
    MLNQuery(queries="OperatesOn(sink-l,carrot-o)",
             verbose=True,
             mln=learned_result,
             db=db).run()
    print(" ---- %s seconds ---- " % (time.time() - start_time))
    pdb.set_trace()