示例#1
0
def generate_databases(mln, instances):
    dbs = []
    for atoms in instances:
        db = Database(mln)
        for atom in atoms:
            db[atom] = 1.0
        dbs.append(db)
    return dbs
示例#2
0
                     nargs="?",
                     default="./data/train.db")
 parser.add_argument("--output_mln",
                     type=str,
                     help="models",
                     nargs="?",
                     default="./models/class_learned.mln")
 parser.add_argument("--shuffle",
                     type=bool,
                     help="1 or 0",
                     nargs="?",
                     default="True")
 args = parser.parse_args()
 # loads the initial MLN and DBs
 mln = MLN.load(args.input_mln)
 dbs = Database.load(mln, args.input_database)
 if args.shuffle:
     shuffle(dbs)
 # runs the learning on the markov logic network to get weights
 start = time()
 learned_mln = MLNLearn(mln=mln,
                        db=dbs,
                        verbose=True,
                        method="BPLL_CG",
                        use_prior=True,
                        multicore=True).run()
 learned_mln.tofile(args.output_mln)
 duration = int((time() - start) / 60.0)
 with open(
         "./results/" + args.output_mln.split("/")[2].split(".")[0] +
         "_traintime.txt", "w") as f:
示例#3
0
 parser.add_argument("--negative_dataset",
                     type=str,
                     help="(.txt)",
                     nargs="?",
                     default="./data/val_data_negative.txt")
 parser.add_argument("--roles_file",
                     type=str,
                     help="(.txt)",
                     nargs="?",
                     default="./data/role_to_values.txt")
 args = parser.parse_args()
 # loads the MLN, DBs, and instances
 with open(args.roles_file, "r") as f:
     roles = eval(f.readlines()[0])
 mln = MLN.load(args.input_mln)
 dbs = Database.load(mln, args.positive_database)
 p_examples = utils.load_flattened_data(args.positive_dataset)
 n_examples = utils.load_flattened_data(args.negative_dataset)
 test_times = []
 # begins testing roles
 for role in roles.keys():
     start = time()
     # creates testing DBs with labels
     test_dbs = generate_test_dbs(role, dbs)
     # gets MLN scores
     scores = score_mln(mln, role, test_dbs)
     # makes instance-score datastructure
     instance_scores = scores2instance_scores(role, roles, p_examples,
                                              n_examples, scores)
     # gets metrics for the role
     utils.compute_metric_scores(p_examples,
示例#4
0
                    ["IsObject(?x)", "IsObject(?y)"]]
 }
 for rel in r2i.keys():
     mln.predicate(Predicate(rel, rel2mln[rel][0]))
 mln.predicate(Predicate("IsRoom", ["rooms"]))
 mln.predicate(Predicate("IsLocation", ["locations"]))
 mln.predicate(Predicate("IsObject", ["objects"]))
 mln.predicate(Predicate("IsAction", ["actions"]))
 mln.predicate(Predicate("IsState", ["states"]))
 # declares the markov logic formulas in the markov logic network
 for pred in mln.iterpreds():
     if "Is" not in pred.name:
         mln << "0.0 " + rel2mln[pred.name][1][0] + " ^ " + rel2mln[
             pred.name][1][1] + " ^ " + pred.name + "(?x, ?y)"
 # loads the 'evidence' to learn markov logic network weights
 db = Database(mln)
 for ent in e2i.keys():
     ent_type = ent.split("-")[-1]
     if ent_type == "r":
         db << "IsRoom(" + ent + ")"
     elif ent_type == "l":
         db << "IsLocation(" + ent + ")"
     elif ent_type == "o":
         db << "IsObject(" + ent + ")"
     elif ent_type == "a":
         db << "IsAction(" + ent + ")"
     elif ent_type == "s":
         db << "IsState(" + ent + ")"
     else:
         print("Error: Unknown entity type for evidence!")
         exit()
示例#5
0
            instance.append(tuple((role, values)))
    return mlninstances


if __name__ == "__main__":
    parser = ArgumentParser(description="Role-Value Dataset 2 MLN Database")
    parser.add_argument("--input_mln", type=str, help="(.mln)", nargs="?",
                        default="./models/class_initial.mln")
    parser.add_argument("--input_datasets", type=str, help="(.txt)", nargs="*",
                        default=["./data/train_data.txt"])
    parser.add_argument("--output_database", type=str, help="(.db)", nargs="?",
                        default="./data/train.db")
    parser.add_argument("--roles_file", type=str, help="(.txt)", nargs="?",
                        default="./data/role_to_values.txt")
    args = parser.parse_args()
    # loads the initial MLN
    mln = MLN.load(args.input_mln)
    # loads data for DBs
    atoms = []
    roles = utils.load_roles(args.roles_file)
    for input_dataset in args.input_datasets:
        rv = utils.load_flattened_data(input_dataset)
        rv = rvs2mlnrvs(mln, roles, rv)
        # formats role-value to atoms
        atoms += utils.format_instances_rv2atoms(rv)
    # generates the DBs and saves
    dbs = generate_databases(mln, atoms)
    with open(args.output_database, "w") as f:
        Database.write_dbs(dbs, f)
    print("The database for the MLN is in " + args.output_database + ".")