def __init__(self, mln, db): self.mln = mln.materialize(db) self._evidence = [] # self.evidenceBackup = {} self._variables = {} self._variables_by_idx = {} # gnd atom idx -> variable self._variables_by_gndatomidx = {} # gnd atom idx self._gndatoms = {} self._gndatoms_by_idx = {} # get combined domain self.domains = mergedom(self.mln.domains, db.domains) # self.softEvidence = list(mln.posteriorProbReqs) # constraints on posterior # probabilities are nothing but # soft evidence and can be handled in exactly the same way # ground members self.formulas = list(self.mln.formulas) # self.gndAtoms = {} # self.gndBlockLookup = {} # self.gndBlocks = {} # self.gndAtomsByIdx = {} # self.gndFormulas = [] # self.gndAtomOccurrencesInGFs = [] if isinstance(db, basestring): db = Database.load(self.mln, dbfile=db) elif isinstance(db, Database): pass elif db is None: db = Database(self.mln) else: raise Exception("Not a valid database argument (type %s)" % (str(type(db)))) self.db = db # materialize formula weights self._materialize_weights() return
class Database: def __init__(self, mln): self.pracmln_database = PRACMLNDatabase(mln.pracmln) self.ground_atoms = [] def add_ground_atom(self, ground_atom): self.ground_atoms.append(ground_atom) self.pracmln_database.add(str(ground_atom), ground_atom.truth_value)
def evalMLN(self, mln, dbs, module): ''' Returns a confusion matrix for the given (learned) MLN evaluated on the databases given in dbs. ''' log = logs.getlogger(self.fold_id) queryPred = self.params.queryPred queryDom = self.params.queryDom sig = [ '?arg%d' % i for i, _ in enumerate(self.params.altMLN.predicates[queryPred]) ] querytempl = '%s(%s)' % (queryPred, ','.join(sig)) dbs = [db.duplicate() for db in dbs] infer = PRACInference(module.prac, []) inferenceStep = PRACInferenceStep(infer, self) for db in dbs: # save and remove the query predicates from the evidence trueDB = Database(self.params.altMLN) for bindings in db.query(querytempl): atom = querytempl for binding in bindings: atom = atom.replace(binding, bindings[binding]) trueDB.addGroundAtom(atom) db.retractGndAtom(atom) try: inferenceStep.output_dbs = [db] infer.inference_steps = [inferenceStep] module.prac.run(infer, module, mln=mln) resultDB = infer.inference_steps[-1].output_dbs[-1] sig2 = list(sig) entityIdx = mln.predicates[queryPred].index(queryDom) for entity in db.domains[queryDom]: sig2[entityIdx] = entity query = '%s(%s)' % (queryPred, ','.join(sig2)) for truth in trueDB.query(query): truth = list(truth.values()).pop() for pred in resultDB.query(query): pred = list(pred.values()).pop() self.confMatrix.addClassificationResult(pred, truth) for e, v in trueDB.evidence.items(): if v is not None: db.addGroundAtom('%s%s' % ('' if v is True else '!', e)) except: log.critical(''.join( traceback.format_exception(*sys.exc_info())))
def __init__(self, params): ''' params being a XValFoldParams object. ''' self.params = params self.fold_id = 'Fold-%d' % params.fold_idx self.confmat = ConfusionMatrix() # write the training and testing databases into a file with open(os.path.join(params.directory, 'train_dbs_%d.db' % params.fold_idx), 'w+') as dbfile: Database.write_dbs(params.learn_dbs, dbfile) with open(os.path.join(params.directory, 'test_dbs_%d.db' % params.fold_idx), 'w+') as dbfile: Database.write_dbs(params.test_dbs, dbfile)
def resultdb(self): if '_resultdb' in self.__dict__: return self._resultdb db = Database(self.mrf.mln) for atom in sorted(self.results, key=str): db[str(atom)] = self.results[atom] return db
def learn(self, databases, method=BPLL, **params): ''' Triggers the learning parameter learning process for a given set of databases. Returns a new MLN object with the learned parameters. :param databases: list of :class:`mln.database.Database` objects or filenames ''' verbose = params.get('verbose', False) # get a list of database objects if not databases: raise Exception('At least one database is needed for learning.') dbs = [] for db in databases: if isinstance(db, basestring): db = Database.load(self, db) if type(db) is list: dbs.extend(db) else: dbs.append(db) elif type(db) is list: dbs.extend(db) else: dbs.append(db) logger.debug('loaded %s evidence databases for learning' % len(dbs)) newmln = self.materialize(*dbs) logger.debug('MLN predicates:') for p in newmln.predicates: logger.debug(p) logger.debug('MLN domains:') for d in newmln.domains.iteritems(): logger.debug(d) if not newmln.formulas: raise Exception('No formulas in the materialized MLN.') logger.debug('MLN formulas:') for f in newmln.formulas: logger.debug('%s %s' % (str(f.weight).ljust(10, ' '), f)) # run learner if len(dbs) == 1: mrf = newmln.ground(dbs[0]) logger.debug('Loading %s-Learner' % method.__name__) learner = method(mrf, **params) else: learner = MultipleDatabaseLearner(newmln, dbs, method, **params) if verbose: "learner: %s" % learner.name wt = learner.run(**params) newmln.weights = wt # fit prior prob. constraints if any available if len(self.probreqs) > 0: fittingParams = { "fittingMethod": self.probabilityFittingInferenceMethod, "fittingSteps": self.probabilityFittingMaxSteps, "fittingThreshold": self.probabilityFittingThreshold } fittingParams.update(params) print "fitting with params ", fittingParams self._fitProbabilityConstraints(self.probreqs, **fittingParams) if params.get('ignore_zero_weight_formulas', False): formulas = list(newmln.formulas) weights = list(newmln.weights) fix = list(newmln.fixweights) newmln._rmformulas() for f, w,fi in zip(formulas, weights, fix): if w != 0: newmln.formula(f, w, fi) return newmln
def __init__(self, params): ''' params being a XValFoldParams object. ''' self.params = params self.fold_id = 'Fold-%d' % params.foldIdx self.confMatrix = ConfusionMatrixSim() # write the training and testing databases into a file dbfile = open( os.path.join(params.directory, 'train_dbs_%d.db' % params.foldIdx), 'w+') Database.writeDBs(params.learnDBs, dbfile) dbfile.close() dbfile = open( os.path.join(params.directory, 'test_dbs_%d.db' % params.foldIdx), 'w+') Database.writeDBs(params.testDBs, dbfile) dbfile.close()
def __init__(self, params): ''' params being a XValFoldParams object. ''' self.params = params self.fold_id = 'Fold-%d' % params.fold_idx self.confmat = ConfusionMatrix() # write the training and testing databases into a file with open( os.path.join(params.directory, 'train_dbs_%d.db' % params.fold_idx), 'w+') as dbfile: Database.write_dbs(params.learn_dbs, dbfile) with open( os.path.join(params.directory, 'test_dbs_%d.db' % params.fold_idx), 'w+') as dbfile: Database.write_dbs(params.test_dbs, dbfile)
def __get_db(self, request, config, mln): if not request.query.evidence and config.db == "": raise Exception("No evidence provided!") if request.query.evidence and config.db != "": raise Exception( "Duplicate evidence; provide either a db in the config or an evidence db in the query" ) if request.query.evidence: to_return = parse_db( mln, reduce(lambda x, y: x + "\n" + y, request.query.evidence)) else: to_return = Database.load(mln, config.db) if len(to_return) != 1: raise Exception("Only one db is supported!") return to_return[0]
f = mln.logic.grammar.parse_formula(f) mln.write() # print f, '===================================================================================' # f.print_structure() # print 'repr of f', repr(f) # print 'list f.literals', list(f.literals()) # print 'parse_formula', mln.logic.parse_formula('bar(x)') in f.literals() # print 'f', f cnf = f.cnf() # print 'structure:' cnf.print_structure() # print 'cnf:',cnf mln.formula(cnf) db = Database(mln) matmln = mln.materialize(db) matmln.write() # test = ['!a(k)', # 'a(c) ^ b(g)', # 'b(x) v !a(l) ^ b(x)', # '!(a(g)) => ((!(f(x) v b(a))))', # "f(h) v (g(?h) <=> !f(?k) ^ d(e))", # 'f(t) ^ ?x = y' # ] # for t in test: # print t # mln.logic.grammar.tree.reset() # mln.logic.grammar.parse_formula(t).print_structure() # print t
from pracmln.mln.base import MLN from pracmln.mln.database import Database from pracmln.mln.inference.exact import EnumerationAsk from pracmln.mln.inference.gibbs import GibbsSampler from pracmln.mln.inference.mcsat import MCSAT from pracmln.mln.constants import ALL mln = MLN(logic='FirstOrderLogic', grammar='StandardGrammar', mlnfile='onenote.mln') db = Database(mln, dbfile='onenote-infer.db') mrf = mln.ground(db) # method = EnumerationAsk(mrf, queries=ALL) # method = GibbsSampler(mrf, chains=1, maxsteps=50, sample=True) method = MCSAT(mrf, chains=10, maxsteps=500, sample=True) result = method.run() result.write()
def infer(self, savegeometry=True, options={}, *args): mln_content = self.mln_container.editor.get("1.0", END).strip() db_content = self.db_container.editor.get("1.0", END).strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print((headline('PRACMLN QUERY TOOL'))) print() if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln( mln_content, searchpaths=[self.dir], projectpath=os.path.join(self.dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('emlnarg') is not None: emln_content = mlnpath(options.get('emlnarg')).content else: emln_content = self.emln_container.editor.get("1.0", END).strip() if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get( 'ignore_unknown_preds', True)) else: out(self.config.get('ignore_unknown_preds', True)) dbobj = parse_db(mlnobj, db_content, ignore_unknown_preds=self.config.get( 'ignore_unknown_preds', True)) if options.get('queryarg') is not None: self.config["queries"] = options.get('queryarg') infer = MLNQuery(config=self.config, mln=mlnobj, db=dbobj, emln=emln_content) result = infer.run() # write to file if run from commandline, otherwise save result to project results if options.get('outputfile') is not None: output = io.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format( os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = io.StringIO() result.write(output) fname = self.output_filename.get() self.project.add_result(fname, output.getvalue()) self.project.save(dirpath=self.dir) logger.info( 'saved result to file results/{} in project {}'.format( fname, self.project.name)) else: logger.debug( 'No output file given - results have not been saved.') except: traceback.print_exc() # restore main window sys.stdout.flush() self.master.deiconify()
def run(self): watch = StopWatch() watch.tag('inference', self.verbose) # load the MLN if isinstance(self.mln, MLN): mln = self.mln else: raise Exception('No MLN specified') if self.use_emln and self.emln is not None: mlnstrio = io.StringIO() mln.write(mlnstrio) mlnstr = mlnstrio.getvalue() mlnstrio.close() emln = self.emln mln = parse_mln(mlnstr + emln, grammar=self.grammar, logic=self.logic) # load the database if isinstance(self.db, Database): db = self.db elif isinstance(self.db, list) and len(self.db) == 1: db = self.db[0] elif isinstance(self.db, list) and len(self.db) == 0: db = Database(mln) elif isinstance(self.db, list): raise Exception( 'Got {} dbs. Can only handle one for inference.'.format( len(self.db))) else: raise Exception('DB of invalid format {}'.format(type(self.db))) # expand the # parameters params = dict(self._config) if 'params' in params: params.update(eval("dict(%s)" % params['params'])) del params['params'] params['verbose'] = self.verbose if self.verbose: print((tabulate(sorted(list(params.items()), key=lambda k_v: str(k_v[0])), headers=('Parameter:', 'Value:')))) if type(db) is list and len(db) > 1: raise Exception('Inference can only handle one database at a time') elif type(db) is list: db = db[0] params['cw_preds'] = [x for x in self.cw_preds if bool(x)] # extract and remove all non-algorithm for s in GUI_SETTINGS: if s in params: del params[s] if self.profile: prof = Profile() print('starting profiler...') prof.enable() # set the debug level olddebug = logger.level logger.level = (eval('logs.%s' % params.get('debug', 'WARNING').upper())) result = None try: mln_ = mln.materialize(db) mrf = mln_.ground(db) inference = self.method(mrf, self.queries, **params) if self.verbose: print() print((headline('EVIDENCE VARIABLES'))) print() mrf.print_evidence_vars() result = inference.run() if self.verbose: print() print((headline('INFERENCE RESULTS'))) print() inference.write() if self.verbose: print() inference.write_elapsed_time() except SystemExit: traceback.print_exc() print('Cancelled...') finally: if self.profile: prof.disable() print((headline('PROFILER STATISTICS'))) ps = pstats.Stats(prof, stream=sys.stdout).sort_stats('cumulative') ps.print_stats() # reset the debug level logger.level = olddebug if self.verbose: print() watch.finish() watch.printSteps() return result
def infer(self, savegeometry=True, options={}, *args): mln_content = self.mln_container.editor.get("1.0", END).encode('utf8').strip() db_content = self.db_container.editor.get("1.0", END).encode('utf8').strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print headline('PRACMLN QUERY TOOL') print if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln(mln_content, searchpaths=[self.dir], projectpath=os.path.join(self.dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('emlnarg') is not None: emln_content = mlnpath(options.get('emlnarg')).content else: emln_content = self.emln_container.editor.get("1.0", END).encode('utf8').strip() if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) else: out(self.config.get('ignore_unknown_preds', True)) dbobj = parse_db(mlnobj, db_content, ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) if options.get('queryarg') is not None: self.config["queries"] = options.get('queryarg') infer = MLNQuery(config=self.config, mln=mlnobj, db=dbobj, emln=emln_content) result = infer.run() # write to file if run from commandline, otherwise save result to project results if options.get('outputfile') is not None: output = StringIO.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = StringIO.StringIO() result.write(output) fname = self.output_filename.get() self.project.add_result(fname, output.getvalue()) self.project.save(dirpath=self.dir) logger.info('saved result to file results/{} in project {}'.format(fname, self.project.name)) else: logger.debug('No output file given - results have not been saved.') except: traceback.print_exc() # restore main window sys.stdout.flush() self.master.deiconify()
def run(self): ''' Run the MLN learning with the given parameters. ''' # load the MLN if isinstance(self.mln, MLN): mln = self.mln else: raise Exception('No MLN specified') # load the training databases if type(self.db) is list and all( map(lambda e: isinstance(e, Database), self.db)): dbs = self.db elif isinstance(self.db, Database): dbs = [self.db] elif isinstance(self.db, basestring): db = self.db if db is None or not db: raise Exception('no trainig data given!') dbpaths = [os.path.join(self.directory, 'db', db)] dbs = [] for p in dbpaths: dbs.extend(Database.load(mln, p, self.ignore_unknown_preds)) else: raise Exception( 'Unexpected type of training databases: %s' % type(self.db)) if self.verbose: print 'loaded %d database(s).' % len(dbs) watch = StopWatch() if self.verbose: confg = dict(self._config) confg.update(eval("dict(%s)" % self.params)) if type(confg.get('db', None)) is list: confg['db'] = '%d Databases' % len(confg['db']) print tabulate( sorted(list(confg.viewitems()), key=lambda (key, v): str(key)), headers=('Parameter:', 'Value:')) params = dict([(k, getattr(self, k)) for k in ( 'multicore', 'verbose', 'profile', 'ignore_zero_weight_formulas')]) # for discriminative learning if issubclass(self.method, DiscriminativeLearner): if self.discr_preds == QUERY_PREDS: # use query preds params['qpreds'] = self.qpreds elif self.discr_preds == EVIDENCE_PREDS: # use evidence preds params['epreds'] = self.epreds # gaussian prior settings if self.use_prior: params['prior_mean'] = self.prior_mean params['prior_stdev'] = self.prior_stdev # expand the parameters params.update(self.params) if self.profile: prof = Profile() print 'starting profiler...' prof.enable() else: prof = None # set the debug level olddebug = praclog.level() praclog.level( eval('logging.%s' % params.get('debug', 'WARNING').upper())) mlnlearnt = None try: # run the learner mlnlearnt = mln.learn(dbs, self.method, **params) if self.verbose: print print headline('LEARNT MARKOV LOGIC NETWORK') print mlnlearnt.write() except SystemExit: print 'Cancelled...' finally: if self.profile: prof.disable() print headline('PROFILER STATISTICS') ps = pstats.Stats(prof, stream=sys.stdout).sort_stats( 'cumulative') ps.print_stats() # reset the debug level praclog.level(olddebug) print watch.finish() watch.printSteps() return mlnlearnt
def learn(self, savegeometry=True, options=None, *_): if options is None: options = {} mln_content = self.mln_container.editor.get("1.0", END).strip() db_content = self.db_container.editor.get("1.0", END).strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print((headline('PRAC LEARNING TOOL'))) print() if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln(mln_content, searchpaths=[self.project_dir], projectpath=os.path.join(self.project_dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) else: if self.config.get('pattern'): local, dblist = self.get_training_db_paths(self.config.get('pattern').strip()) dbobj = [] # build database list from project dbs if local: for dbname in dblist: dbobj.extend(parse_db(mlnobj, self.project.dbs[dbname].strip(), ignore_unknown_preds=self.config.get('ignore_unknown_preds', True), projectpath=os.path.join(self.dir, self.project.name))) out(dbobj) # build database list from filesystem dbs else: for dbpath in dblist: dbobj.extend(Database.load(mlnobj, dbpath, ignore_unknown_preds= self.config.get('ignore_unknown_preds', True))) # build single db from currently selected db else: dbobj = parse_db(mlnobj, db_content, projectpath=os.path.join(self.dir, self.project.name), dirs=[self.dir]) learning = MLNLearn(config=self.config, mln=mlnobj, db=dbobj) result = learning.run() # write to file if run from commandline, otherwise save result # to project results if options.get('outputfile') is not None: output = io.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = io.StringIO() result.write(output) self.project.add_mln(self.output_filename.get(), output.getvalue()) self.mln_container.update_file_choices() self.project.save(dirpath=self.project_dir) logger.info('saved result to file mln/{} in project {}'.format(self.output_filename.get(), self.project.name)) else: logger.debug("No output file given - results have not been saved.") except: traceback.print_exc() # restore gui sys.stdout.flush() self.master.deiconify()
def __init__(self, mln): self.pracmln_database = PRACMLNDatabase(mln.pracmln) self.ground_atoms = []
from pracmln.mln.base import MLN from pracmln.mln.database import Database from pracmln.mln.learning.bpll import BPLL mln = MLN(logic='FirstOrderLogic', grammar='StandardGrammar', mlnfile='onenote.mln') # mln.fixweights = [True, False] db = Database(mln, dbfile='onenote-train.db') mrf = mln.ground(db) method = BPLL(mrf, prior_stdev=1, verbose=True) result = method.run() for w in result: print('{:.3f}'.format(w))
def resultdb(self): db = Database(self.mrf.mln) for atom in sorted(self.results, key=str): db[str(atom)] = self.results[atom] return db
def run(self): ''' Run the MLN learning with the given parameters. ''' # load the MLN if isinstance(self.mln, MLN): mln = self.mln else: raise Exception('No MLN specified') # load the training databases if type(self.db) is list and all( [isinstance(e, Database) for e in self.db]): dbs = self.db elif isinstance(self.db, Database): dbs = [self.db] elif isinstance(self.db, str): db = self.db if db is None or not db: raise Exception('no trainig data given!') dbpaths = [os.path.join(self.directory, 'db', db)] dbs = [] for p in dbpaths: dbs.extend(Database.load(mln, p, self.ignore_unknown_preds)) else: raise Exception( 'Unexpected type of training databases: %s' % type(self.db)) if self.verbose: print(('loaded %d database(s).' % len(dbs))) watch = StopWatch() if self.verbose: confg = dict(self._config) confg.update(eval("dict(%s)" % self.params)) if type(confg.get('db', None)) is list: confg['db'] = '%d Databases' % len(confg['db']) print((tabulate( sorted(list(confg.items()), key=lambda key_v: str(key_v[0])), headers=('Parameter:', 'Value:')))) params = dict([(k, getattr(self, k)) for k in ( 'multicore', 'verbose', 'profile', 'ignore_zero_weight_formulas')]) # for discriminative learning if issubclass(self.method, DiscriminativeLearner): if self.discr_preds == QUERY_PREDS: # use query preds params['qpreds'] = self.qpreds elif self.discr_preds == EVIDENCE_PREDS: # use evidence preds params['epreds'] = self.epreds # gaussian prior settings if self.use_prior: params['prior_mean'] = self.prior_mean params['prior_stdev'] = self.prior_stdev # expand the parameters params.update(self.params) if self.profile: prof = Profile() print('starting profiler...') prof.enable() else: prof = None # set the debug level olddebug = logger.level logger.level = eval('logs.%s' % params.get('debug', 'WARNING').upper()) mlnlearnt = None try: # run the learner mlnlearnt = mln.learn(dbs, self.method, **params) if self.verbose: print() print(headline('LEARNT MARKOV LOGIC NETWORK')) print() mlnlearnt.write() except SystemExit: print('Cancelled...') finally: if self.profile: prof.disable() print(headline('PROFILER STATISTICS')) ps = pstats.Stats(prof, stream=sys.stdout).sort_stats( 'cumulative') ps.print_stats() # reset the debug level logger.level = olddebug print() watch.finish() watch.printSteps() return mlnlearnt
def learn(self, savegeometry=True, options={}, *args): mln_content = self.mln_container.editor.get("1.0", END).encode('utf8').strip() db_content = self.db_container.editor.get("1.0", END).encode('utf8').strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print headline('PRAC LEARNING TOOL') print if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln(mln_content, searchpaths=[self.project_dir], projectpath=os.path.join(self.project_dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) else: if self.config.get('pattern'): local, dblist = self.get_training_db_paths(self.config.get('pattern').strip()) dbobj = [] # build database list from project dbs if local: for dbname in dblist: dbobj.extend(parse_db(mlnobj, self.project.dbs[dbname].strip(), ignore_unknown_preds=self.config.get('ignore_unknown_preds', True), projectpath=os.path.join(self.dir, self.project.name))) out(dbobj) # build database list from filesystem dbs else: for dbpath in dblist: dbobj.extend(Database.load(mlnobj, dbpath, ignore_unknown_preds= self.config.get('ignore_unknown_preds', True))) # build single db from currently selected db else: dbobj = parse_db(mlnobj, db_content, projectpath=os.path.join(self.dir, self.project.name), dirs=[self.dir]) learning = MLNLearn(config=self.config, mln=mlnobj, db=dbobj) result = learning.run() # write to file if run from commandline, otherwise save result # to project results if options.get('outputfile') is not None: output = StringIO.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = StringIO.StringIO() result.write(output) self.project.add_mln(self.output_filename.get(), output.getvalue()) self.mln_container.update_file_choices() self.project.save(dirpath=self.project_dir) logger.info('saved result to file mln/{} in project {}'.format(self.output_filename.get(), self.project.name)) else: logger.debug("No output file given - results have not been saved.") except: traceback.print_exc() # restore gui sys.stdout.flush() self.master.deiconify()