Example #1
0
    def run(self, mlnFiles, evidenceDB, method, queries, engine="PyMLNs", output_filename="", params="{}", **settings):
        '''
            runs an MLN inference method with the given parameters
        
            mlnFiles: list of one or more MLN input files
            evidenceDB: name of the MLN database file from which to read evidence data
            engine: either "PyMLNs"/"internal", "J-MLNs" or one of the keys in the configured Alchemy versions (see configMLN.py)
            method: name of the inference method
            queries: comma-separated list of queries
            output_filename (compulsory only when using Alchemy): name of the file to which to save results
                For the internal engine, specify saveResults=True as an additional settings to save the results
            params: additional parameters to pass to inference method. For the internal engine, it is a comma-separated
                list of assignments to parameters (dictionary-type string), for the others it's just a string of command-line
                options to pass on
            settings: additional settings that control the inference process, which are usually set by the GUI (see code)
                
            returns a mapping (dictionary) from ground atoms to probability values.
                For J-MLNs, results are only returned if settings are saved to a file (settings["saveResults"]=True and output_filename given)
        '''
        self.settings = dict(self.default_settings)        
        self.settings.update(settings)
        input_files = mlnFiles
        db = evidenceDB
        query = ",".join(queries)
        
        results_suffix = ".results"
        output_base_filename = output_filename
        if output_base_filename[-len(results_suffix):] == results_suffix:
            output_base_filename = output_base_filename[:-len(results_suffix)]
        
        # determine closed-world preds
        cwPreds = []
        if "cwPreds" in self.settings:
            cwPreds = filter(lambda x: x != "", map(str.strip, self.settings["cwPreds"].split(",")))
        haveOutFile = False
        results = None
        
        # engine-specific handling
        if engine in ("internal", "PyMLNs"): 
            try:
                print "\nStarting %s...\n" % method
                print "\nqueries: %s...\n" % queries
                
                # read queries
                _queries = []
                q = ""
                for s in queries:
                    if q != "": q += ','
                    q += s
                    if MLN.balancedParentheses(q):
                        _queries.append(q)
                        q = ""
                print "\n_queries: %s...\n" % _queries
                if q != "": raise Exception("Unbalanced parentheses in queries!")
                
                # create MLN
                verbose = True
                print input_files
                mln = MLN.MLN(input_files, verbose=verbose, defaultInferenceMethod=MLN.InferenceMethods.byName(method))
                
                # set closed-world predicates
                for pred in cwPreds:
                    mln.setClosedWorldPred(pred)
                
                # create ground MRF
                mrf = mln.groundMRF(db, verbose=verbose)
                
                # collect inference arguments
                args = {"details":True, "verbose":verbose, "shortOutput":True, "debugLevel":1}
                args.update(eval("dict(%s)" % params)) # add additional parameters
                if args.get("debug", False) and args["debugLevel"] > 1:
                    print "\nground formulas:"
                    mrf.printGroundFormulas()
                    print
                if self.settings["numChains"] != "":
                    args["numChains"] = int(self.settings["numChains"])
                if self.settings["maxSteps"] != "":
                    args["maxSteps"] = int(self.settings["maxSteps"])
                outFile = None
                if self.settings["saveResults"]:
                    haveOutFile = True
                    outFile = file(output_filename, "w")
                    args["outFile"] = outFile
                args["probabilityFittingResultFileName"] = output_base_filename + "_fitted.mln"

                # check for print/write requests
                if "printGroundAtoms" in args:
                    if args["printGroundAtoms"]:
                        mrf.printGroundAtoms()
                if "printGroundFormulas" in args:
                    if args["printGroundFormulas"]:
                        mrf.printGroundFormulas()
                if "writeGraphML" in args:
                    if args["writeGraphML"]:
                        graphml_filename = output_base_filename + ".graphml"
                        print "writing ground MRF as GraphML to %s..." % graphml_filename
                        mrf.writeGraphML(graphml_filename)
                    
                # invoke inference and retrieve results
                mrf.infer(_queries, **args)
                results = {}
                for gndFormula, p in mrf.getResultsDict().iteritems():
                    results[str(gndFormula)] = p
                
                # close output file and open if requested
                if outFile != None:
                    outFile.close()
            except:
                cls, e, tb = sys.exc_info()
                sys.stderr.write("Error: %s\n" % str(e))
                traceback.print_tb(tb)
            
        return results
Example #2
0
 def start(self):
     #try:
     # get mln, db, qf and output filename
     mln = self.selected_mln.get()
     db = self.selected_db.get()
     qf = self.selected_qf.get()
     mln_text = self.selected_mln.get_text()
     db_text = self.selected_db.get_text()
     qf_text = self.selected_qf.get_text()
     output = self.output_filename.get()
     method = self.selected_method.get()
     keep_written_db = True
     params = self.params.get()
     # update settings
     self.settings["mln"] = mln
     self.settings["mln_rename"] = self.selected_mln.rename_on_edit.get()
     self.settings["db"] = db
     self.settings["db_rename"] = self.selected_db.rename_on_edit.get()
     self.settings["method%d" % int(self.internalMode)] = method
     self.settings["params%d" % int(self.internalMode)] = params
     self.settings["query"] = self.query.get()
     self.settings["engine"] = self.selected_engine.get()
     self.settings["qf"] = qf
     self.settings["output_filename"] = output
     self.settings["openWorld"] = self.open_world.get()
     self.settings["cwPreds"] = self.cwPreds.get()
     self.settings["convertAlchemy"] = self.convert_to_alchemy.get()
     self.settings["maxSteps"] = self.maxSteps.get()
     self.settings["numChains"] = self.numChains.get()
     self.settings["geometry"] = self.master.winfo_geometry()
     self.settings["saveResults"] = self.save_results.get()
     # write query
     # - to file
     write_query_file = False
     if write_query_file:
         query_file = "%s.query" % db
         f = file(query_file, "w")
         f.write(self.settings["query"])
         f.close()
     # - to settings
     self.settings["queryByDB"][db] = self.settings["query"]
     # write settings
     pickle.dump(self.settings, file(configname, "w+"))
     # hide main window
     self.master.withdraw()
     # some information
     print "\n--- query ---\n%s" % self.settings["query"]
     print "\n--- evidence (%s) ---\n%s" % (db, db_text.strip())
     # engine
     haveOutFile = False
     if self.settings["engine"] == "internal":  # internal engine
         try:
             print "\nStarting %s...\n" % method
             # read queries
             queries = []
             query = ""
             for s in map(str.strip, self.settings["query"].split(",")):
                 if query != "": query += ','
                 query += s
                 if MLN.balancedParentheses(query):
                     queries.append(query)
                     query = ""
             if query != "":
                 raise Exception("Unbalanced parentheses in queries!")
             # create MLN and evidence conjunction
             mln = MLN.MLN(
                 mln,
                 verbose=True,
                 defaultInferenceMethod=MLN.InferenceMethods._byName.get(
                     method))
             evidence = MLN.evidence2conjunction(
                 mln.combineDB(db, verbose=True))
             # set closed-world predicates
             cwPreds = map(str.strip, self.settings["cwPreds"].split(","))
             for pred in cwPreds:
                 if pred != "": mln.setClosedWorldPred(pred)
             # collect inference arguments
             args = {
                 "details": True,
                 "verbose": True,
                 "shortOutput": True,
                 "debugLevel": 1
             }
             args.update(eval("dict(%s)" %
                              params))  # add additional parameters
             if args.get("debug", False) and args["debugLevel"] > 1:
                 print "\nground formulas:"
                 mln.printGroundFormulas()
                 print
             if self.settings["numChains"] != "":
                 args["numChains"] = int(self.settings["numChains"])
             if self.settings["maxSteps"] != "":
                 args["maxSteps"] = int(self.settings["maxSteps"])
             outFile = None
             if self.settings["saveResults"]:
                 haveOutFile = True
                 outFile = file(output, "w")
                 args["outFile"] = outFile
             # check for print requests
             if "printGroundAtoms" in args:
                 mln.printGroundAtoms()
             # invoke inference
             results = mln.infer(queries, evidence, **args)
             # close output file and open if requested
             if outFile != None:
                 outFile.close()
         except:
             cls, e, tb = sys.exc_info()
             sys.stderr.write("Error: %s\n" % str(e))
             traceback.print_tb(tb)
     else:  # engine is Alchemy
         haveOutFile = True
         infile = mln
         mlnObject = None
         # explicitly convert MLN to Alchemy format, i.e. resolve weights that are arithm. expressions (on request) -> create temporary file
         if self.settings["convertAlchemy"]:
             print "\n--- temporary MLN ---\n"
             mlnObject = MLN.MLN(mln)
             infile = mln[:mln.rfind(".")] + ".alchemy.mln"
             f = file(infile, "w")
             mlnObject.write(f)
             f.close()
             mlnObject.write(sys.stdout)
             print "\n---"
         # get alchemy version-specific data
         alchemy_version = self.alchemy_versions[self.selected_engine.get()]
         print alchemy_version
         print type(alchemy_version)
         if type(alchemy_version) != dict:
             alchemy_version = {"path": str(alchemy_version)}
         usage = config.default_infer_usage
         if "usage" in alchemy_version:
             usage = alchemy_version["usage"]
         # parse additional parameters for input files
         input_files = [infile]
         add_params = params.split()
         i = 0
         while i < len(add_params):
             if add_params[i] == "-i":
                 input_files.append(add_params[i + 1])
                 del add_params[i]
                 del add_params[i]
                 continue
             i += 1
         # create command to execute
         params = ' -i "%s" -e "%s" -r "%s" -q "%s" %s %s' % (
             ",".join(input_files), db, output, self.settings["query"],
             self.alchemy_methods[method], " ".join(add_params))
         if self.settings["numChains"] != "":
             params += " %s %s" % (usage["numChains"],
                                   self.settings["numChains"])
         if self.settings["maxSteps"] != "":
             params += " %s %s" % (usage["maxSteps"],
                                   self.settings["maxSteps"])
         path = alchemy_version["path"]
         path2 = os.path.join(path, "bin")
         if os.path.exists(path2):
             path = path2
         if self.settings["openWorld"] == 1:
             print "\nFinding predicate names..."
             if mlnObject is None:
                 mlnObject = MLN.MLN(mln)
             params += " %s %s" % (usage["openWorld"], ",".join(
                 mlnObject.predicates))
         command = '%s %s' % (os.path.join(path, "infer"), params)
         # remove old output file (if any)
         if os.path.exists(output):
             os.remove(output)
         # execute
         print "\nStarting Alchemy..."
         print "\ncommand:\n%s\n" % command
         t_start = time.time()
         os.system(command)
         t_taken = time.time() - t_start
         # print results file
         if True:
             print "\n\n--- output ---\n"
             os.system("cat %s" % output)
             print "\n"
         # append information on query and mln to results file
         f = file(output, "a")
         f.write(
             "\n\n/*\n\n--- command ---\n%s\n\n--- evidence ---\n%s\n\n--- mln ---\n%s\ntime taken: %fs\n\n*/"
             % (command, db_text.strip(), mln_text.strip(), t_taken))
         f.close()
         # delete written db
         if not (keep_written_db) and wrote_db:
             os.unlink(db)
         # delete temporary mln
         if self.settings["convertAlchemy"] and not config_value(
                 "keep_alchemy_conversions", True):
             os.unlink(infile)
     # open results file
     if haveOutFile and config.query_edit_outfile_when_done:
         editor = config.editor
         print 'starting editor: %s %s' % (editor, output)
         run = os.spawnl
         if "spawnlp" in dir(os):
             run = os.spawnlp
         run(os.P_NOWAIT, editor, editor, output)
     # restore main window
     self.master.deiconify()
     self.setGeometry()
     # reload the files (in case they changed)
     self.selected_mln.reloadFile()
     self.selected_db.reloadFile()
Example #3
0
 def start(self):
     #try:
         # get mln, db, qf and output filename
         mln = self.selected_mln.get()
         db = self.selected_db.get()
         qf = self.selected_qf.get()
         mln_text = self.selected_mln.get_text()
         db_text = self.selected_db.get_text()
         qf_text = self.selected_qf.get_text()
         output = self.output_filename.get()
         method = self.selected_method.get()
         keep_written_db = True
         params = self.params.get()
         # update settings
         self.settings["mln"] = mln
         self.settings["mln_rename"] = self.selected_mln.rename_on_edit.get()
         self.settings["db"] = db
         self.settings["db_rename"] = self.selected_db.rename_on_edit.get()
         self.settings["method%d" % int(self.internalMode)] = method
         self.settings["params%d" % int(self.internalMode)] = params
         self.settings["query"] = self.query.get()
         self.settings["engine"] = self.selected_engine.get()
         self.settings["qf"] = qf
         self.settings["output_filename"] = output
         self.settings["openWorld"] = self.open_world.get()
         self.settings["cwPreds"] = self.cwPreds.get()
         self.settings["convertAlchemy"] = self.convert_to_alchemy.get()
         self.settings["maxSteps"] = self.maxSteps.get()
         self.settings["numChains"] = self.numChains.get()
         self.settings["geometry"] = self.master.winfo_geometry()
         self.settings["saveResults"] = self.save_results.get()
         # write query
         # - to file
         write_query_file = False
         if write_query_file:
             query_file = "%s.query" % db
             f = file(query_file, "w")
             f.write(self.settings["query"])
             f.close()
         # - to settings
         self.settings["queryByDB"][db] = self.settings["query"]
         # write settings
         pickle.dump(self.settings, file(configname, "w+"))
         # hide main window
         self.master.withdraw()
         # some information
         print "\n--- query ---\n%s" % self.settings["query"]
         print "\n--- evidence (%s) ---\n%s" % (db, db_text.strip())
         # engine
         haveOutFile = False
         if self.settings["engine"] == "internal": # internal engine
             try: 
                 print "\nStarting %s...\n" % method
                 # read queries
                 queries = []
                 query = ""
                 for s in map(str.strip, self.settings["query"].split(",")):
                     if query != "": query += ','
                     query += s
                     if MLN.balancedParentheses(query):
                         queries.append(query)
                         query = ""
                 if query != "": raise Exception("Unbalanced parentheses in queries!")
                 # create MLN and evidence conjunction
                 mln = MLN.MLN(mln, verbose=True, defaultInferenceMethod=MLN.InferenceMethods._byName.get(method))
                 evidence = MLN.evidence2conjunction(mln.combineDB(db, verbose=True))
                 # set closed-world predicates
                 cwPreds = map(str.strip, self.settings["cwPreds"].split(","))
                 for pred in cwPreds:
                     if pred != "": mln.setClosedWorldPred(pred)
                 # collect inference arguments
                 args = {"details":True, "verbose":True, "shortOutput":True, "debugLevel":1}
                 args.update(eval("dict(%s)" % params)) # add additional parameters
                 if args.get("debug", False) and args["debugLevel"] > 1:
                     print "\nground formulas:"
                     mln.printGroundFormulas()
                     print
                 if self.settings["numChains"] != "":
                     args["numChains"] = int(self.settings["numChains"])
                 if self.settings["maxSteps"] != "":
                     args["maxSteps"] = int(self.settings["maxSteps"])
                 outFile = None
                 if self.settings["saveResults"]:
                     haveOutFile = True
                     outFile = file(output, "w")
                     args["outFile"] = outFile
                 # check for print requests
                 if "printGroundAtoms" in args:
                     mln.printGroundAtoms()
                 # invoke inference
                 results = mln.infer(queries, evidence, **args)
                 # close output file and open if requested
                 if outFile != None:
                     outFile.close()
             except:
                 cls, e, tb = sys.exc_info()
                 sys.stderr.write("Error: %s\n" % str(e))
                 traceback.print_tb(tb)
         else: # engine is Alchemy
             haveOutFile = True
             infile = mln
             mlnObject = None
             # explicitly convert MLN to Alchemy format, i.e. resolve weights that are arithm. expressions (on request) -> create temporary file
             if self.settings["convertAlchemy"]:
                 print "\n--- temporary MLN ---\n"
                 mlnObject = MLN.MLN(mln)
                 infile = mln[:mln.rfind(".")]+".alchemy.mln"
                 f = file(infile, "w")
                 mlnObject.write(f)
                 f.close()
                 mlnObject.write(sys.stdout)
                 print "\n---"
             # get alchemy version-specific data
             alchemy_version = self.alchemy_versions[self.selected_engine.get()]
             print alchemy_version
             print type(alchemy_version)
             if type(alchemy_version) != dict:
                 alchemy_version = {"path": str(alchemy_version)}
             usage = config.default_infer_usage
             if "usage" in alchemy_version:
                 usage = alchemy_version["usage"]
             # parse additional parameters for input files
             input_files = [infile]
             add_params = params.split()
             i = 0
             while i < len(add_params):
                 if add_params[i] == "-i":
                     input_files.append(add_params[i+1])
                     del add_params[i]
                     del add_params[i]
                     continue
                 i += 1
             # create command to execute
             params = ' -i "%s" -e "%s" -r "%s" -q "%s" %s %s' % (",".join(input_files), db, output, self.settings["query"], self.alchemy_methods[method], " ".join(add_params))
             if self.settings["numChains"] != "":
                 params += " %s %s" % (usage["numChains"], self.settings["numChains"])
             if self.settings["maxSteps"] != "":
                 params += " %s %s" % (usage["maxSteps"], self.settings["maxSteps"])
             path = alchemy_version["path"]
             path2 = os.path.join(path, "bin")
             if os.path.exists(path2):
                 path = path2
             if self.settings["openWorld"] == 1:
                 print "\nFinding predicate names..."
                 if mlnObject is None:
                     mlnObject = MLN.MLN(mln)
                 params += " %s %s" % (usage["openWorld"], ",".join(mlnObject.predicates))
             command = '%s %s' % (os.path.join(path, "infer"), params)
             # remove old output file (if any)
             if os.path.exists(output):
                 os.remove(output)
             # execute 
             print "\nStarting Alchemy..."
             print "\ncommand:\n%s\n" % command
             t_start = time.time()
             os.system(command)
             t_taken = time.time() - t_start
             # print results file
             if True:
                 print "\n\n--- output ---\n"
                 os.system("cat %s" % output)
                 print "\n"
             # append information on query and mln to results file
             f = file(output, "a")
             f.write("\n\n/*\n\n--- command ---\n%s\n\n--- evidence ---\n%s\n\n--- mln ---\n%s\ntime taken: %fs\n\n*/" % (command, db_text.strip(), mln_text.strip(), t_taken))
             f.close()
             # delete written db
             if not(keep_written_db) and wrote_db:
                 os.unlink(db)
             # delete temporary mln
             if self.settings["convertAlchemy"] and not config_value("keep_alchemy_conversions", True):
                 os.unlink(infile)
         # open results file
         if haveOutFile and config.query_edit_outfile_when_done:
             editor = config.editor
             print 'starting editor: %s %s' % (editor, output)
             run = os.spawnl
             if "spawnlp" in dir(os):
                 run = os.spawnlp
             run(os.P_NOWAIT, editor, editor, output)
         # restore main window
         self.master.deiconify()
         self.setGeometry()
         # reload the files (in case they changed)
         self.selected_mln.reloadFile()
         self.selected_db.reloadFile()
Example #4
0
    def run(self, mlnFiles, evidenceDB, method, queries, engine="PyMLNs", output_filename=None, params="", **settings):
        '''
            runs an MLN inference method with the given parameters
        
            mlnFiles: list of one or more MLN input files
            evidenceDB: name of the MLN database file from which to read evidence data
            engine: either "PyMLNs"/"internal", "J-MLNs" or one of the keys in the configured Alchemy versions (see configMLN.py)
            method: name of the inference method
            queries: comma-separated list of queries
            output_filename (compulsory only when using Alchemy): name of the file to which to save results
                For the internal engine, specify saveResults=True as an additional settings to save the results
            params: additional parameters to pass to inference method. For the internal engine, it is a comma-separated
                list of assignments to parameters (dictionary-type string), for the others it's just a string of command-line
                options to pass on
            settings: additional settings that control the inference process, which are usually set by the GUI (see code)
                
            returns a mapping (dictionary) from ground atoms to probability values.
                For J-MLNs, results are only returned if settings are saved to a file (settings["saveResults"]=True and output_filename given)
        '''
        self.settings = dict(self.default_settings)        
        self.settings.update(settings)
        input_files = mlnFiles
        db = evidenceDB
        query = queries
        
        results_suffix = ".results"
        output_base_filename = output_filename
        if output_base_filename[-len(results_suffix):] == results_suffix:
            output_base_filename = output_base_filename[:-len(results_suffix)]
        
        # determine closed-world preds
        cwPreds = []
        if "cwPreds" in self.settings:            
            cwPreds = filter(lambda x: x != "", map(str.strip, self.settings["cwPreds"].split(",")))
        haveOutFile = False
        results = None
        
        # engine-specific handling
        if engine in ("internal", "PyMLNs"): 
            try:
                print "\nStarting %s...\n" % method
                
                # read queries
                queries = []
                q = ""
                for s in map(str.strip, query.split(",")):
                    if q != "": q += ','
                    q += s
                    if MLN.balancedParentheses(q):
                        queries.append(q)
                        q = ""
                if q != "": raise Exception("Unbalanced parentheses in queries!")
                
                # create MLN
                verbose = True
                mln = MLN.MLN(input_files, verbose=verbose, defaultInferenceMethod=MLN.InferenceMethods.byName(method))
                
                # set closed-world predicates
                for pred in cwPreds:
                    mln.setClosedWorldPred(pred)
                
                # create ground MRF
                mrf = mln.groundMRF(db, verbose=verbose)
                
                # collect inference arguments
                args = {"details":True, "verbose":verbose, "shortOutput":True, "debugLevel":1}
                args.update(eval("dict(%s)" % params)) # add additional parameters
                if args.get("debug", False) and args["debugLevel"] > 1:
                    print "\nground formulas:"
                    mrf.printGroundFormulas()
                    print
                if self.settings["numChains"] != "":
                    args["numChains"] = int(self.settings["numChains"])
                if self.settings["maxSteps"] != "":
                    args["maxSteps"] = int(self.settings["maxSteps"])
                outFile = None
                if self.settings["saveResults"]:
                    haveOutFile = True
                    outFile = file(output_filename, "w")
                    args["outFile"] = outFile
                args["probabilityFittingResultFileName"] = output_base_filename + "_fitted.mln"

                # check for print/write requests
                if "printGroundAtoms" in args:
                    if args["printGroundAtoms"]:
                        mrf.printGroundAtoms()
                if "printGroundFormulas" in args:
                    if args["printGroundFormulas"]:
                        mrf.printGroundFormulas()
                if "writeGraphML" in args:
                    if args["writeGraphML"]:
                        graphml_filename = output_base_filename + ".graphml"
                        print "writing ground MRF as GraphML to %s..." % graphml_filename
                        mrf.writeGraphML(graphml_filename)
                    
                # invoke inference and retrieve results
                mrf.infer(queries, **args)
                results = {}
                for gndFormula, p in mrf.getResultsDict().iteritems():
                    results[str(gndFormula)] = p
                
                # close output file and open if requested
                if outFile != None:
                    outFile.close()
            except:
                cls, e, tb = sys.exc_info()
                sys.stderr.write("Error: %s\n" % str(e))
                traceback.print_tb(tb)
                
        elif engine == "J-MLNs": # engine is J-MLNs (ProbCog's Java implementation)
            
            # create command to execute
            app = "MLNinfer"
            params = [app, "-i", ",".join(input_files), "-e", db, "-q", query, self.jmlns_methods[method]] + shlex.split(params)
            if self.settings["saveResults"]:
                params += ["-r", output_filename]
            if self.settings["maxSteps"] != "":
                params += ["-maxSteps", self.settings["maxSteps"]]
            if len(cwPreds) > 0:
                params += ["-cw", ",".join(cwPreds)]
            outFile = None
            if self.settings["saveResults"]:
                outFile = output_filename
                params += ["-r", outFile]
            
            # execute
            params = map(str, params)
            print "\nStarting J-MLNs..."
            print "\ncommand:\n%s\n" % " ".join(params)
            t_start = time.time()
            call(params)
            t_taken = time.time() - t_start
            
            if outFile is not None:
                results = dict(readAlchemyResults(outFile))
        
        else: # engine is Alchemy
            haveOutFile = True
            infile = mlnFiles[0]
            mlnObject = None
            # explicitly convert MLN to Alchemy format, i.e. resolve weights that are arithm. expressions (on request) -> create temporary file
            if self.settings["convertAlchemy"]:
                print "\n--- temporary MLN ---\n"
                mlnObject = MLN.MLN(input_files)
                infile = input_files[0]
                infile = infile[:infile.rfind(".")]+".alchemy.mln"
                f = file(infile, "w")
                mlnObject.write(f)
                f.close()
                mlnObject.write(sys.stdout)
                input_files = [infile]
                print "\n---"
            # get alchemy version-specific data
            alchemy_version = self.alchemy_versions[engine]
            if type(alchemy_version) != dict:
                alchemy_version = {"path": str(alchemy_version)}
            usage = config.default_infer_usage
            if "usage" in alchemy_version:
                usage = alchemy_version["usage"]
            # find alchemy binary
            path = alchemy_version["path"]
            path2 = os.path.join(path, "bin")
            if os.path.exists(path2):
                path = path2
            alchemyInfer = os.path.join(path, "infer")
            if not os.path.exists(alchemyInfer) and not os.path.exists(alchemyInfer+".exe"):
                error = "Alchemy's infer/infer.exe binary not found in %s. Please configure Alchemy in python/configMLN.py" % path
                tkMessageBox.showwarning("Error", error)
                raise Exception(error)
            # parse additional parameters for input files
            add_params = shlex.split(params)
            i = 0
            while i < len(add_params):
                if add_params[i] == "-i":
                    input_files.append(add_params[i+1])
                    del add_params[i]
                    del add_params[i]
                    continue
                i += 1
            # create command to execute
            if output_filename is None: raise Exception("For Alchemy, provide an output filename!")            
            params = [alchemyInfer, "-i", ",".join(input_files), "-e", db, "-q", query, "-r", output_filename, self.alchemy_methods[method]] + add_params            
            if self.settings["numChains"] != "":
                params += [usage["numChains"], self.settings["numChains"]]
            if self.settings["maxSteps"] != "":
                params += [usage["maxSteps"], self.settings["maxSteps"]]
            owPreds = []
            if self.settings["openWorld"]:
                print "\nFinding predicate names..."
                preds = MLN.getPredicateList(infile)
                owPreds = filter(lambda x: x not in cwPreds, preds)
                params += [usage["openWorld"], ",".join(owPreds)]
            if len(cwPreds) > 0:
                params += ["-cw", ",".join(cwPreds)]
            # remove old output file (if any)
            if os.path.exists(output_filename):
                os.remove(output_filename)
                pass
            # execute
            params = map(str, params)
            print "\nStarting Alchemy..."
            command = subprocess.list2cmdline(params)
            print "\ncommand:\n%s\n" % " ".join(params)
            t_start = time.time()
            call(params)
            t_taken = time.time() - t_start
            # print results file
            if True:
                print "\n\n--- output ---\n"
                results = dict(readAlchemyResults(output_filename))
                for atom, prob in results.iteritems():
                    print "%.4f  %s" % (prob, atom)                    
                print "\n"
            # append information on query and mln to results file
            f = file(output_filename, "a")
            dbfile = file(db, "r")
            db_text = dbfile.read()
            dbfile.close()
            infile = file(infile, "r")
            mln_text = infile.read()
            infile.close()
            f.write("\n\n/*\n\n--- command ---\n%s\n\n--- evidence ---\n%s\n\n--- mln ---\n%s\ntime taken: %fs\n\n*/" % (command, db_text.strip(), mln_text.strip(), t_taken))
            f.close()
            # delete temporary mln
            if self.settings["convertAlchemy"] and not config_value("keep_alchemy_conversions", True):
                os.unlink(infile)
                
        # open output file in editor
        if False and haveOutFile and config.query_edit_outfile_when_done: # this is mostly useless
            editor = config.editor
            params = [editor, output_filename]
            print 'starting editor: %s' % subprocess.list2cmdline(params)
            subprocess.Popen(params, shell=False)
            
        return results