Exemple #1
0
 def learn(infile, mode, dbfile, startpt=False, rigidPreds=[]):
     mln = MLN(infile)    
     #db = "tinyk%d%s" %  (k, db)
     mln.combineDB(dbfile)
     for predName in rigidPreds:
         mln.setRigidPredicate(predName)
     mln.learnwts(mode, initialWts=startpt)
     prefix = 'wts'
     if mode == 'PLL':
         tag = "pll"
     else:
         tag = mode.lower()
         if mode == 'LL' and not POSSWORLDS_BLOCKING:
             tag += "-nobl"
         if mode == 'LL_fac':
             prefix = 'fac'
     fname = ("%s.py%s.%s" % (prefix, tag, infile[3:]))
     mln.write(file(fname, "w"))
     print "WROTE %s\n\n" % fname
Exemple #2
0
#must run source env.sh first

import os, json

from tqdm import tqdm
from MLN import *
from pprint import pprint

folder = os.path.relpath('.', '..')

mln_filename = 'wts.%s.mln' % folder
mrf_filename = "test.db"

patient_labels = json.load(open('./test_labels.json', 'r'))

mln = MLN(mln_filename)
mrf = mln.groundMRF(mrf_filename)
results = {}

tasks = [("MC-SAT", "PyMLNs")]

queries = [
    str(query + "(" + patient + ")")
    for patient, query in patient_labels.iteritems()
]

probabilities = mrf.inferMCSAT(queries, verbose=False)

for (patient, query), probability in zip(patient_labels.iteritems(),
                                         probabilities):
    if patient not in results:
Exemple #3
0
# -*- coding: iso-8859-1 -*-

# This is a very simple example script that illustrates how
# you can use scripting to automate your inference tasks.

from MLN import *

mln = MLN("wts.pybpll.smoking-train-smoking.mln")
mrf = mln.groundMRF("smoking-test-smaller.db")
queries = ["Smokes(Ann)", "Smokes(Bob)", "Smokes(Ann) ^ Smokes(Bob)"]
mrf.inferMCSAT(queries, verbose=False)
for query, prob in mrf.getResultsDict().iteritems():
    print "  %f  %s" % (prob, query)
Exemple #4
0
    def run(self, **kwargs):
        '''
            required arguments:
                training databases(s): either one of
                    "dbs": list of database filenames (or MLN.Database objects for PyMLNs)
                    "db": database filename
                    "pattern": file mask pattern from which to generate the list of databases
                "mln": an MLN filename (or MLN.MLN object for PyMLNs)
                "method": the learning method name
                "output_filename": the output filename
            
            optional arguments:
                "engine": either "PyMLNs" (default) or one of the Alchemy versions defined in the config
                "initialWts": (true/false)
                "usePrior": (true/false); default: False
                "priorStdDev": (float) standard deviation of prior when usePrior=True
                "addUnitClauses": (true/false) whether to add unit clauses (Alchemy only); default: False
                "params": (string) additional parameters to pass to the learner; for Alchemy: command-line parameters; for PyMLNs: either dictionary string (e.g. "foo=bar, baz=2") or a dictionary object
                ...
        '''
        defaults = {
            "engine": "PyMLNs",
            "usePrior": False,
            "priorStdDev": 10.0,
            "addUnitClauses": False,
            "params": ""
        }
        self.settings = defaults
        self.settings.update(kwargs)

        # determine training databases(s)
        if "dbs" in self.settings:
            dbs = self.settings["dbs"]
        elif "db" in self.settings and self.settings["db"] != "":
            dbs = [self.settings["db"]]
        elif "pattern" in self.settings and self.settings["pattern"] != "":
            dbs = []
            pattern = settings["pattern"]
            dir, mask = os.path.split(os.path.abspath(pattern))
            for fname in os.listdir(dir):
                if fnmatch(fname, mask):
                    dbs.append(os.path.join(dir, fname))
            if len(dbs) == 0:
                raise Exception("The pattern '%s' matches no files" % pattern)
            print "training databases:", ",".join(dbs)
        else:
            raise Exception(
                "No training data given; A training database must be selected or a pattern must be specified"
            )

        # check if other required arguments are set
        missingSettings = set(["mln", "method", "output_filename"
                               ]).difference(set(self.settings.keys()))
        if len(missingSettings) > 0:
            raise Exception("Some required settings are missing: %s" %
                            str(missingSettings))

        params = self.settings["params"]
        method = self.settings["method"]
        discriminative = "discriminative" in method

        if self.settings["engine"] in ("PyMLNs",
                                       "internal"):  # PyMLNs internal engine
            # arguments
            args = {"initialWts": False}
            if type(params) == str:
                params = eval("dict(%s)" % params)
            elif type(params) != dict:
                raise ("Argument 'params' must be string or a dictionary")
            args.update(params)  # add additional parameters
            if discriminative:
                args["queryPreds"] = self.settings["nePreds"].split(",")
            if self.settings["usePrior"]:
                args["gaussianPriorSigma"] = float(
                    self.settings["priorStdDev"])
            # learn weights
            if type(self.settings["mln"]) == str:
                mln = MLN.MLN(self.settings["mln"])
            elif type(self.settings["mln"] == MLN.MLN):
                mln = self.settings["mln"]
            else:
                raise Exception(
                    "Argument 'mln' must be either string or MLN object")
            mln.learnWeights(
                dbs,
                method=MLN.ParameterLearningMeasures.byName(method),
                **args)
            # determine output filename
            fname = self.settings["output_filename"]
            mln.write(file(fname, "w"))
            print "\nWROTE %s\n\n" % fname
            #mln.write(sys.stdout)
        else:  # Alchemy
            if self.settings["engine"] not in self.alchemy_versions:
                raise Exception(
                    "Invalid alchemy version '%s'. Known versions: %s" %
                    (self.settings["engine"], ", ".join(
                        lambda x: '"%s"' % x, self.alchemy_versions.keys())))
            alchemy_version = self.alchemy_versions[self.settings["engine"]]
            if type(alchemy_version) != dict:
                alchemy_version = {"path": str(alchemy_version)}
            # find binary
            path = alchemy_version["path"]
            path2 = os.path.join(path, "bin")
            if os.path.exists(path2):
                path = path2
            alchemyLearn = os.path.join(path, "learnwts")
            if not os.path.exists(alchemyLearn) and not os.path.exists(
                    alchemyLearn + ".exe"):
                error = "Alchemy's learnwts/learnwts.exe binary not found in %s. Please configure Alchemy in python/configMLN.py" % path
                tkMessageBox.showwarning("Error", error)
                raise Exception(error)
            # run Alchemy's learnwts
            method_switches, discriminativeAsGenerative, shortname = self.alchemy_methods[
                method]
            params = [alchemyLearn] + method_switches + [
                "-i", self.settings["mln"], "-o",
                self.settings["output_filename"], "-t", ",".join(dbs)
            ] + shlex.split(params)
            if discriminative:
                params += ["-ne", self.settings["nePreds"]]
            elif discriminativeAsGenerative:
                preds = MLN.getPredicateList(self.settings["mln"])
                params += ["-ne", ",".join(preds)]
            if not self.settings["addUnitClauses"]:
                params.append("-noAddUnitClauses")
            if not self.settings["usePrior"]:
                params.append("-noPrior")
            else:
                if self.settings["priorStdDev"] != "":
                    params += ["-priorStdDev", self.settings["priorStdDev"]]

            command = subprocess.list2cmdline(params)
            print "\n", command, "\n"

            #print "running Alchemy's learnwts..."
            p = subprocess.Popen(params,
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE)
            cin, cout = p.stdin, p.stdout
            #cin, cout = os.popen2(command)
            output_text = ""
            while True:
                l = cout.readline()
                if l == "":
                    break
                print l,
                output_text += l

            # add data reported by learnwts and, from the input mln, domain declarations and rules for mutual exclusiveness and exhaustiveness
            if True:
                # read the input file
                f = file(self.settings["mln"], "r")
                text = f.read()
                f.close()
                comment = re.compile(r'//.*?^|/\*.*\*/',
                                     re.DOTALL | re.MULTILINE)
                text = re.sub(comment, '', text)
                merules = []
                domain_decls = []
                for l in text.split("\n"):
                    l = l.strip()
                    # domain decls
                    if "{" in l:
                        domain_decls.append(l)
                    # mutex rules
                    m = re.match(r"\w+\((.*?)\)", l)
                    if m != None and m.group(0) == l and ("!" in m.group(1)):
                        merules.append(m.group(0))
                # read the output file
                f = file(self.settings["output_filename"], "r")
                outfile = f.read()
                f.close()
                # rewrite the output file
                f = file(self.settings["output_filename"], "w")
                # - get report with command line and learnwts output
                if config.learnwts_full_report:
                    report = output_text
                else:
                    report = output_text[output_text.
                                         rfind("Computing counts took"):]
                report = "/*\n%s\n\n%s*/\n" % (command, report)
                # - write
                outfile = outfile.replace("//function declarations",
                                          "\n".join(merules))
                if not config.learnwts_report_bottom: f.write(report + "\n")
                f.write("// domain declarations\n" + "\n".join(domain_decls) +
                        "\n\n")
                f.write(outfile)
                if config.learnwts_report_bottom: f.write("\n\n" + report)
                f.close()
Exemple #5
0
#Inference: infer mln database results query mode debug
#Learn: learnwts mln database mlnout mode
if __name__ == '__main__':
    #sys.argv = [sys.argv[0], "infer", "smokers.mln", "smokers.db", "smokers.res",'cancer', "debug"]
    #sys.argv = [sys.argv[0], "learnwts", "smokers.mln", "smokers.db", "smokers-out.mln", "BPLL"]
    args = sys.argv[1:]
    print args
    #sys.path.append('/home/pedro/Desktop/pymlns')
    import MLN

    #Reflexoes: Pode-se deixar assim e distribuir o pymlns a mesma... utilizadores podem dizer qual o pymlns que querem, mas isto traz um por defeito (pode-se fazer o mesmo com o alchemy)
    #Se calhar faz mais sentido o python ler o config e nao o java...

    if args[0] == "infer":
        mln = MLN.MLN(args[1])
        evidence = MLN.evidence2conjunction(mln.combineDB(args[2]))
        infargs = {"shortOutput": True, "outFile": file(args[3], "w")}
        if len(args) > 6 and args[6] == "debug":
            infargs.update({"details": True, "verbose": True, "debugLevel": 1})

        queries = args[4].split(";")
        if args[5] == "Exact":  #Mode = Exact, Gibbs, MC-SAT
            mln.inferExact(queries, evidence, **infargs)
        elif args[5] == "Gibbs":
            mln.inferGibbs(queries, evidence, **infargs)
        else:
            mln.inferMCSAT(queries, evidence, **infargs)

        #mln.infer(queries, evidence, **infargs)
        print "Inference done!\n"
Exemple #6
0
     print "              print the ground clauses we obtain when instantiating an MRF with the given database\n"
     print "           printGA <mln file> <db file>"
     print "              print the ground atoms we obtain when instantiating an MRF with the given database\n"
     print "           inferExact <mln file> <domain> <query> <evidence>"
     print "              domain: a dictionary mapping domain names to lists of constants, e.g."
     print "                      \"{'dom1':['const1', 'const2'], 'dom2':['const3']}\""
     print "                      To use just the constants declared in the MLN, use \"{}\""
     print "              query, evidence: ground formulas\n" 
     print "           inferGibbs <mln file> <domain> <query> <evidence>\n"
     print "           topWorlds <mln file> <domain>\n"
     print "           test <test name>"
     print "              run the test with the given name (dev only)\n"
     print "  NOTE: This script exposes but a tiny fraction of the functionality of the MLN class!\n"
     sys.exit(0)
 if args[0] == "print":
     mln = MLN(args[1])
     mln.write(sys.stdout)
 elif args[0] == 'printGF':
     mln = MLN(args[1])
     mln.combineDB(args[2])
     mln.printGroundFormulas()
 elif args[0] == 'printGC':
     mln = MLN(args[1])
     mln.combineDB(args[2])
     mln._toCNF()
     mln.printGroundFormulas()
 elif args[0] == 'printGA':
     mln = MLN(args[1])
     mln.combineDB(args[2], groundFormulas=False)
     mln.printGroundAtoms()
 elif args[0] == "inferExact":
Exemple #7
0
    def learn(self):
        try:
            # update settings
            mln = self.selected_mln.get()
            db = self.selected_db.get()
            if "" in (db, mln): return
            method = self.selected_method.get()
            params = self.params.get()
            self.settings["mln"] = mln
            self.settings["db"] = db
            self.settings["output_filename"] = self.output_filename.get()
            self.settings["params%d" % int(self.internalMode)] = params
            self.settings["engine"] = self.selected_engine.get()
            self.settings["method%d" % int(self.internalMode)] = method
            self.settings["geometry"] = self.master.winfo_geometry()
            #print "dumping config..."
            pickle.dump(self.settings, file("learnweights.config.dat", "w+"))

            # hide gui
            self.master.withdraw()

            if self.settings["engine"] == "internal":  # internal engine
                # load MLN and training database
                mln = MLN.MLN(self.settings["mln"])
                mln.combineDB(self.settings["db"])
                # arguments
                args = {"initialWts": False}
                args.update(eval("dict(%s)" %
                                 params))  # add additional parameters
                # learn weights
                mln.learnwts(MLN.ParameterLearningMeasures._byName[method],
                             **args)
                # determine output filename
                fname = self.settings["output_filename"]
                mln.write(file(fname, "w"))
                print "\nWROTE %s\n\n" % fname
                #mln.write(sys.stdout)
            else:  # Alchemy
                alchemy_version = self.alchemy_versions[
                    self.selected_engine.get()]
                if type(alchemy_version) != dict:
                    alchemy_version = {"path": str(alchemy_version)}
                # run Alchemy's learnwts
                method_switch = self.alchemy_methods[method]
                params = '%s -i "%s" -o "%s" -t %s %s' % (
                    method_switch, self.settings["mln"],
                    self.settings["output_filename"], self.settings["db"],
                    params)
                path = alchemy_version["path"]
                path2 = os.path.join(path, "bin")
                if os.path.exists(path2):
                    path = path2
                command = '%s %s' % (os.path.join(path, "learnwts"), params)
                print "\n", command, "\n"
                self.master.withdraw()  # hide gui
                #print "running Alchemy's learnwts..."
                cin, cout = os.popen2(command)
                output_text = ""
                while True:
                    l = cout.readline()
                    if l == "":
                        break
                    print l,
                    output_text += l

                # add data reported by learnwts and, from the input mln, domain declarations and rules for mutual exclusiveness and exhaustiveness
                if True:
                    # read the input file
                    f = file(self.settings["mln"], "r")
                    text = f.read()
                    f.close()
                    comment = re.compile(r'//.*?^|/\*.*\*/',
                                         re.DOTALL | re.MULTILINE)
                    text = re.sub(comment, '', text)
                    merules = []
                    domain_decls = []
                    for l in text.split("\n"):
                        l = l.strip()
                        # domain decls
                        if "{" in l:
                            domain_decls.append(l)
                        # mutex rules
                        m = re.match(r"\w+\((.*?)\)", l)
                        if m != None and m.group(0) == l and ("!"
                                                              in m.group(1)):
                            merules.append(m.group(0))
                    # read the output file
                    f = file(self.settings["output_filename"], "r")
                    outfile = f.read()
                    f.close()
                    # rewrite the output file
                    f = file(self.settings["output_filename"], "w")
                    # - get report with command line and learnwts output
                    if config.learnwts_full_report:
                        report = output_text
                    else:
                        report = output_text[output_text.
                                             rfind("Computing counts took"):]
                    report = "/*\n%s\n\n%s*/\n" % (command, report)
                    # - write
                    outfile = outfile.replace("//function declarations",
                                              "\n".join(merules))
                    if not config.learnwts_report_bottom:
                        f.write(report + "\n")
                    f.write("// domain declarations\n" +
                            "\n".join(domain_decls) + "\n\n")
                    f.write(outfile)
                    if config.learnwts_report_bottom: f.write("\n\n" + report)
                    f.close()

                editor = config.editor
                print "starting editor %s %s" % (
                    editor, self.settings["output_filename"])
                os.spawnl(os.P_NOWAIT, editor, editor,
                          self.settings["output_filename"])

            # restore gui
            self.master.deiconify()
            self.setGeometry()
        except:
            cls, e, tb = sys.exc_info()
            print "Error: %s " % str(e)
            traceback.print_tb(tb)
Exemple #8
0
 def start(self):
     #try:
     # get mln, db, qf and output filename
     mln = self.selected_mln.get()
     db = self.selected_db.get()
     qf = self.selected_qf.get()
     mln_text = self.selected_mln.get_text()
     db_text = self.selected_db.get_text()
     qf_text = self.selected_qf.get_text()
     output = self.output_filename.get()
     method = self.selected_method.get()
     keep_written_db = True
     params = self.params.get()
     # update settings
     self.settings["mln"] = mln
     self.settings["mln_rename"] = self.selected_mln.rename_on_edit.get()
     self.settings["db"] = db
     self.settings["db_rename"] = self.selected_db.rename_on_edit.get()
     self.settings["method%d" % int(self.internalMode)] = method
     self.settings["params%d" % int(self.internalMode)] = params
     self.settings["query"] = self.query.get()
     self.settings["engine"] = self.selected_engine.get()
     self.settings["qf"] = qf
     self.settings["output_filename"] = output
     self.settings["openWorld"] = self.open_world.get()
     self.settings["cwPreds"] = self.cwPreds.get()
     self.settings["convertAlchemy"] = self.convert_to_alchemy.get()
     self.settings["maxSteps"] = self.maxSteps.get()
     self.settings["numChains"] = self.numChains.get()
     self.settings["geometry"] = self.master.winfo_geometry()
     self.settings["saveResults"] = self.save_results.get()
     # write query
     # - to file
     write_query_file = False
     if write_query_file:
         query_file = "%s.query" % db
         f = file(query_file, "w")
         f.write(self.settings["query"])
         f.close()
     # - to settings
     self.settings["queryByDB"][db] = self.settings["query"]
     # write settings
     pickle.dump(self.settings, file(configname, "w+"))
     # hide main window
     self.master.withdraw()
     # some information
     print "\n--- query ---\n%s" % self.settings["query"]
     print "\n--- evidence (%s) ---\n%s" % (db, db_text.strip())
     # engine
     haveOutFile = False
     if self.settings["engine"] == "internal":  # internal engine
         try:
             print "\nStarting %s...\n" % method
             # read queries
             queries = []
             query = ""
             for s in map(str.strip, self.settings["query"].split(",")):
                 if query != "": query += ','
                 query += s
                 if MLN.balancedParentheses(query):
                     queries.append(query)
                     query = ""
             if query != "":
                 raise Exception("Unbalanced parentheses in queries!")
             # create MLN and evidence conjunction
             mln = MLN.MLN(
                 mln,
                 verbose=True,
                 defaultInferenceMethod=MLN.InferenceMethods._byName.get(
                     method))
             evidence = MLN.evidence2conjunction(
                 mln.combineDB(db, verbose=True))
             # set closed-world predicates
             cwPreds = map(str.strip, self.settings["cwPreds"].split(","))
             for pred in cwPreds:
                 if pred != "": mln.setClosedWorldPred(pred)
             # collect inference arguments
             args = {
                 "details": True,
                 "verbose": True,
                 "shortOutput": True,
                 "debugLevel": 1
             }
             args.update(eval("dict(%s)" %
                              params))  # add additional parameters
             if args.get("debug", False) and args["debugLevel"] > 1:
                 print "\nground formulas:"
                 mln.printGroundFormulas()
                 print
             if self.settings["numChains"] != "":
                 args["numChains"] = int(self.settings["numChains"])
             if self.settings["maxSteps"] != "":
                 args["maxSteps"] = int(self.settings["maxSteps"])
             outFile = None
             if self.settings["saveResults"]:
                 haveOutFile = True
                 outFile = file(output, "w")
                 args["outFile"] = outFile
             # check for print requests
             if "printGroundAtoms" in args:
                 mln.printGroundAtoms()
             # invoke inference
             results = mln.infer(queries, evidence, **args)
             # close output file and open if requested
             if outFile != None:
                 outFile.close()
         except:
             cls, e, tb = sys.exc_info()
             sys.stderr.write("Error: %s\n" % str(e))
             traceback.print_tb(tb)
     else:  # engine is Alchemy
         haveOutFile = True
         infile = mln
         mlnObject = None
         # explicitly convert MLN to Alchemy format, i.e. resolve weights that are arithm. expressions (on request) -> create temporary file
         if self.settings["convertAlchemy"]:
             print "\n--- temporary MLN ---\n"
             mlnObject = MLN.MLN(mln)
             infile = mln[:mln.rfind(".")] + ".alchemy.mln"
             f = file(infile, "w")
             mlnObject.write(f)
             f.close()
             mlnObject.write(sys.stdout)
             print "\n---"
         # get alchemy version-specific data
         alchemy_version = self.alchemy_versions[self.selected_engine.get()]
         print alchemy_version
         print type(alchemy_version)
         if type(alchemy_version) != dict:
             alchemy_version = {"path": str(alchemy_version)}
         usage = config.default_infer_usage
         if "usage" in alchemy_version:
             usage = alchemy_version["usage"]
         # parse additional parameters for input files
         input_files = [infile]
         add_params = params.split()
         i = 0
         while i < len(add_params):
             if add_params[i] == "-i":
                 input_files.append(add_params[i + 1])
                 del add_params[i]
                 del add_params[i]
                 continue
             i += 1
         # create command to execute
         params = ' -i "%s" -e "%s" -r "%s" -q "%s" %s %s' % (
             ",".join(input_files), db, output, self.settings["query"],
             self.alchemy_methods[method], " ".join(add_params))
         if self.settings["numChains"] != "":
             params += " %s %s" % (usage["numChains"],
                                   self.settings["numChains"])
         if self.settings["maxSteps"] != "":
             params += " %s %s" % (usage["maxSteps"],
                                   self.settings["maxSteps"])
         path = alchemy_version["path"]
         path2 = os.path.join(path, "bin")
         if os.path.exists(path2):
             path = path2
         if self.settings["openWorld"] == 1:
             print "\nFinding predicate names..."
             if mlnObject is None:
                 mlnObject = MLN.MLN(mln)
             params += " %s %s" % (usage["openWorld"], ",".join(
                 mlnObject.predicates))
         command = '%s %s' % (os.path.join(path, "infer"), params)
         # remove old output file (if any)
         if os.path.exists(output):
             os.remove(output)
         # execute
         print "\nStarting Alchemy..."
         print "\ncommand:\n%s\n" % command
         t_start = time.time()
         os.system(command)
         t_taken = time.time() - t_start
         # print results file
         if True:
             print "\n\n--- output ---\n"
             os.system("cat %s" % output)
             print "\n"
         # append information on query and mln to results file
         f = file(output, "a")
         f.write(
             "\n\n/*\n\n--- command ---\n%s\n\n--- evidence ---\n%s\n\n--- mln ---\n%s\ntime taken: %fs\n\n*/"
             % (command, db_text.strip(), mln_text.strip(), t_taken))
         f.close()
         # delete written db
         if not (keep_written_db) and wrote_db:
             os.unlink(db)
         # delete temporary mln
         if self.settings["convertAlchemy"] and not config_value(
                 "keep_alchemy_conversions", True):
             os.unlink(infile)
     # open results file
     if haveOutFile and config.query_edit_outfile_when_done:
         editor = config.editor
         print 'starting editor: %s %s' % (editor, output)
         run = os.spawnl
         if "spawnlp" in dir(os):
             run = os.spawnlp
         run(os.P_NOWAIT, editor, editor, output)
     # restore main window
     self.master.deiconify()
     self.setGeometry()
     # reload the files (in case they changed)
     self.selected_mln.reloadFile()
     self.selected_db.reloadFile()
Exemple #9
0
    def run(self, mlnFiles, evidenceDB, method, queries, engine="PyMLNs", output_filename=None, params="", **settings):
        '''
            runs an MLN inference method with the given parameters
        
            mlnFiles: list of one or more MLN input files
            evidenceDB: name of the MLN database file from which to read evidence data
            engine: either "PyMLNs"/"internal", "J-MLNs" or one of the keys in the configured Alchemy versions (see configMLN.py)
            method: name of the inference method
            queries: comma-separated list of queries
            output_filename (compulsory only when using Alchemy): name of the file to which to save results
                For the internal engine, specify saveResults=True as an additional settings to save the results
            params: additional parameters to pass to inference method. For the internal engine, it is a comma-separated
                list of assignments to parameters (dictionary-type string), for the others it's just a string of command-line
                options to pass on
            settings: additional settings that control the inference process, which are usually set by the GUI (see code)
                
            returns a mapping (dictionary) from ground atoms to probability values.
                For J-MLNs, results are only returned if settings are saved to a file (settings["saveResults"]=True and output_filename given)
        '''
        self.settings = dict(self.default_settings)        
        self.settings.update(settings)
        input_files = mlnFiles
        db = evidenceDB
        query = queries
        
        results_suffix = ".results"
        output_base_filename = output_filename
        if output_base_filename[-len(results_suffix):] == results_suffix:
            output_base_filename = output_base_filename[:-len(results_suffix)]
        
        # determine closed-world preds
        cwPreds = []
        if "cwPreds" in self.settings:            
            cwPreds = filter(lambda x: x != "", map(str.strip, self.settings["cwPreds"].split(",")))
        haveOutFile = False
        results = None
        
        # engine-specific handling
        if engine in ("internal", "PyMLNs"): 
            try:
                print "\nStarting %s...\n" % method
                
                # read queries
                queries = []
                q = ""
                for s in map(str.strip, query.split(",")):
                    if q != "": q += ','
                    q += s
                    if MLN.balancedParentheses(q):
                        queries.append(q)
                        q = ""
                if q != "": raise Exception("Unbalanced parentheses in queries!")
                
                # create MLN
                verbose = True
                mln = MLN.MLN(input_files, verbose=verbose, defaultInferenceMethod=MLN.InferenceMethods.byName(method))
                
                # set closed-world predicates
                for pred in cwPreds:
                    mln.setClosedWorldPred(pred)
                
                # create ground MRF
                mrf = mln.groundMRF(db, verbose=verbose)
                
                # collect inference arguments
                args = {"details":True, "verbose":verbose, "shortOutput":True, "debugLevel":1}
                args.update(eval("dict(%s)" % params)) # add additional parameters
                if args.get("debug", False) and args["debugLevel"] > 1:
                    print "\nground formulas:"
                    mrf.printGroundFormulas()
                    print
                if self.settings["numChains"] != "":
                    args["numChains"] = int(self.settings["numChains"])
                if self.settings["maxSteps"] != "":
                    args["maxSteps"] = int(self.settings["maxSteps"])
                outFile = None
                if self.settings["saveResults"]:
                    haveOutFile = True
                    outFile = file(output_filename, "w")
                    args["outFile"] = outFile
                args["probabilityFittingResultFileName"] = output_base_filename + "_fitted.mln"

                # check for print/write requests
                if "printGroundAtoms" in args:
                    if args["printGroundAtoms"]:
                        mrf.printGroundAtoms()
                if "printGroundFormulas" in args:
                    if args["printGroundFormulas"]:
                        mrf.printGroundFormulas()
                if "writeGraphML" in args:
                    if args["writeGraphML"]:
                        graphml_filename = output_base_filename + ".graphml"
                        print "writing ground MRF as GraphML to %s..." % graphml_filename
                        mrf.writeGraphML(graphml_filename)
                    
                # invoke inference and retrieve results
                mrf.infer(queries, **args)
                results = {}
                for gndFormula, p in mrf.getResultsDict().iteritems():
                    results[str(gndFormula)] = p
                
                # close output file and open if requested
                if outFile != None:
                    outFile.close()
            except:
                cls, e, tb = sys.exc_info()
                sys.stderr.write("Error: %s\n" % str(e))
                traceback.print_tb(tb)
                
        elif engine == "J-MLNs": # engine is J-MLNs (ProbCog's Java implementation)
            
            # create command to execute
            app = "MLNinfer"
            params = [app, "-i", ",".join(input_files), "-e", db, "-q", query, self.jmlns_methods[method]] + shlex.split(params)
            if self.settings["saveResults"]:
                params += ["-r", output_filename]
            if self.settings["maxSteps"] != "":
                params += ["-maxSteps", self.settings["maxSteps"]]
            if len(cwPreds) > 0:
                params += ["-cw", ",".join(cwPreds)]
            outFile = None
            if self.settings["saveResults"]:
                outFile = output_filename
                params += ["-r", outFile]
            
            # execute
            params = map(str, params)
            print "\nStarting J-MLNs..."
            print "\ncommand:\n%s\n" % " ".join(params)
            t_start = time.time()
            call(params)
            t_taken = time.time() - t_start
            
            if outFile is not None:
                results = dict(readAlchemyResults(outFile))
        
        else: # engine is Alchemy
            haveOutFile = True
            infile = mlnFiles[0]
            mlnObject = None
            # explicitly convert MLN to Alchemy format, i.e. resolve weights that are arithm. expressions (on request) -> create temporary file
            if self.settings["convertAlchemy"]:
                print "\n--- temporary MLN ---\n"
                mlnObject = MLN.MLN(input_files)
                infile = input_files[0]
                infile = infile[:infile.rfind(".")]+".alchemy.mln"
                f = file(infile, "w")
                mlnObject.write(f)
                f.close()
                mlnObject.write(sys.stdout)
                input_files = [infile]
                print "\n---"
            # get alchemy version-specific data
            alchemy_version = self.alchemy_versions[engine]
            if type(alchemy_version) != dict:
                alchemy_version = {"path": str(alchemy_version)}
            usage = config.default_infer_usage
            if "usage" in alchemy_version:
                usage = alchemy_version["usage"]
            # find alchemy binary
            path = alchemy_version["path"]
            path2 = os.path.join(path, "bin")
            if os.path.exists(path2):
                path = path2
            alchemyInfer = os.path.join(path, "infer")
            if not os.path.exists(alchemyInfer) and not os.path.exists(alchemyInfer+".exe"):
                error = "Alchemy's infer/infer.exe binary not found in %s. Please configure Alchemy in python/configMLN.py" % path
                tkMessageBox.showwarning("Error", error)
                raise Exception(error)
            # parse additional parameters for input files
            add_params = shlex.split(params)
            i = 0
            while i < len(add_params):
                if add_params[i] == "-i":
                    input_files.append(add_params[i+1])
                    del add_params[i]
                    del add_params[i]
                    continue
                i += 1
            # create command to execute
            if output_filename is None: raise Exception("For Alchemy, provide an output filename!")            
            params = [alchemyInfer, "-i", ",".join(input_files), "-e", db, "-q", query, "-r", output_filename, self.alchemy_methods[method]] + add_params            
            if self.settings["numChains"] != "":
                params += [usage["numChains"], self.settings["numChains"]]
            if self.settings["maxSteps"] != "":
                params += [usage["maxSteps"], self.settings["maxSteps"]]
            owPreds = []
            if self.settings["openWorld"]:
                print "\nFinding predicate names..."
                preds = MLN.getPredicateList(infile)
                owPreds = filter(lambda x: x not in cwPreds, preds)
                params += [usage["openWorld"], ",".join(owPreds)]
            if len(cwPreds) > 0:
                params += ["-cw", ",".join(cwPreds)]
            # remove old output file (if any)
            if os.path.exists(output_filename):
                os.remove(output_filename)
                pass
            # execute
            params = map(str, params)
            print "\nStarting Alchemy..."
            command = subprocess.list2cmdline(params)
            print "\ncommand:\n%s\n" % " ".join(params)
            t_start = time.time()
            call(params)
            t_taken = time.time() - t_start
            # print results file
            if True:
                print "\n\n--- output ---\n"
                results = dict(readAlchemyResults(output_filename))
                for atom, prob in results.iteritems():
                    print "%.4f  %s" % (prob, atom)                    
                print "\n"
            # append information on query and mln to results file
            f = file(output_filename, "a")
            dbfile = file(db, "r")
            db_text = dbfile.read()
            dbfile.close()
            infile = file(infile, "r")
            mln_text = infile.read()
            infile.close()
            f.write("\n\n/*\n\n--- command ---\n%s\n\n--- evidence ---\n%s\n\n--- mln ---\n%s\ntime taken: %fs\n\n*/" % (command, db_text.strip(), mln_text.strip(), t_taken))
            f.close()
            # delete temporary mln
            if self.settings["convertAlchemy"] and not config_value("keep_alchemy_conversions", True):
                os.unlink(infile)
                
        # open output file in editor
        if False and haveOutFile and config.query_edit_outfile_when_done: # this is mostly useless
            editor = config.editor
            params = [editor, output_filename]
            print 'starting editor: %s' % subprocess.list2cmdline(params)
            subprocess.Popen(params, shell=False)
            
        return results