def load(mln, dbfiles, ignore_unknown_preds=False, db=None): ''' Reads one or multiple database files containing literals and/or domains. Returns one or multiple databases where domains is dictionary mapping domain names to lists of constants defined in the database and evidence is a dictionary mapping ground atom strings to truth values :param dbfile: a single one or a list of paths to database file. :param mln: the MLN object which should be used to load the database. :returns: either one single or a list of database objects. :Example: >>> mln = MLN() >>> db = Database.load(mln, './example.db') ''' if type(dbfiles) is not list: dbfiles = [dbfiles] dbs = [] for dbpath in dbfiles: if isinstance(dbpath, basestring): dbpath = mlnpath(dbpath) if isinstance(dbpath, mlnpath): projectpath = None if dbpath.project is not None: projectpath = dbpath.projectloc dirs = [os.path.dirname(fp) for fp in dbfiles] dbs_ = parse_db(mln, content=dbpath.content, ignore_unknown_preds=ignore_unknown_preds, db=db, dirs=dirs, projectpath=projectpath) dbs.extend(dbs_) else: raise Exception('Illegal db file specifier: %s' % dbpath) if len(dbs) > 1 and db is not None: raise Exception('Cannot attach multiple databases to a single database object. Use Database.load(..., db=None).') else: return dbs
def load(mln, dbfiles, ignore_unknown_preds=False, db=None): ''' Reads one or multiple database files containing literals and/or domains. Returns one or multiple databases where domains is dictionary mapping domain names to lists of constants defined in the database and evidence is a dictionary mapping ground atom strings to truth values :param dbfile: a single one or a list of paths to database file. :param mln: the MLN object which should be used to load the database. :returns: either one single or a list of database objects. :Example: >>> mln = MLN() >>> db = Database.load(mln, './example.db') ''' if type(dbfiles) is not list: dbfiles = [dbfiles] dbs = [] for dbpath in dbfiles: if isinstance(dbpath, basestring): dbpath = mlnpath(dbpath) if isinstance(dbpath, mlnpath): projectpath = None if dbpath.project is not None: projectpath = dbpath.projectloc dirs = [os.path.dirname(fp) for fp in dbfiles] dbs_ = parse_db(mln, content=dbpath.content, ignore_unknown_preds=ignore_unknown_preds, db=db, dirs=dirs, projectpath=projectpath) dbs.extend(dbs_) else: raise Exception('Illegal db file specifier: %s' % dbpath) if len(dbs) > 1 and db is not None: raise Exception('Cannot attach multiple databases to a single database object. Use Database.load(..., db=None).') else: return dbs
def load(files, logic='FirstOrderLogic', grammar='PRACGrammar', mln=None): ''' Reads an MLN object from a file or a set of files. :param files: one or more :class:`pracmln.mlnpath` strings. If multiple file names are given, the contents of all files will be concatenated. :param logic: (string) the type of logic to be used. Either `FirstOrderLogic` or `FuzzyLogic`. :param grammar: (string) the syntax to be used for parsing the MLN file. Either `PRACGrammar` or `StandardGrammar`. ''' # read MLN file text = '' if files is not None: if not type(files) is list: files = [files] projectpath = None for f in files: if isinstance(f, basestring): p = mlnpath(f) if p.project is not None: projectpath = p.projectloc text += p.content elif isinstance(f, mlnpath): text += f.content else: raise Exception('Unexpected file specification: %s' % str(f)) dirs = [os.path.dirname(fn) for fn in files] return parse_mln(text, searchpaths=dirs, projectpath=projectpath, logic=logic, grammar=grammar, mln=mln) raise Exception('No mln files given.')
def load(files, logic='FirstOrderLogic', grammar='PRACGrammar', mln=None): ''' Reads an MLN object from a file or a set of files. :param files: one or more :class:`pracmln.mlnpath` strings. If multiple file names are given, the contents of all files will be concatenated. :param logic: (string) the type of logic to be used. Either `FirstOrderLogic` or `FuzzyLogic`. :param grammar: (string) the syntax to be used for parsing the MLN file. Either `PRACGrammar` or `StandardGrammar`. ''' # read MLN file text = '' if files is not None: if not type(files) is list: files = [files] projectpath = None for f in files: if isinstance(f, basestring): p = mlnpath(f) if p.project is not None: projectpath = p.projectloc text += p.content elif isinstance(f, mlnpath): text += f.content else: raise Exception('Unexpected file specification: %s' % str(f)) dirs = [os.path.dirname(fn) for fn in files] return parse_mln(text, searchpaths=dirs, projectpath=projectpath, logic=logic, grammar=grammar, mln=mln) raise Exception('No mln files given.')
def import_file(self): filename = askopenfilename(initialdir=self.dir, filetypes=self.fsettings.get('ftypes'), defaultextension=self.fsettings.get('extension', '.mln')) if filename: fpath, fname = ntpath.split(filename) self.dir = os.path.abspath(fpath) content = mlnpath(filename).content if self.import_hook is not None: self.import_hook(fname, content) self.update_file_choices() self.selected_file.set(fname) self.dirty = True
def import_file(self): filename = askopenfilename(initialdir=self.dir, filetypes=self.fsettings.get('ftypes'), defaultextension=self.fsettings.get( 'extension', '.mln')) if filename: fpath, fname = ntpath.split(filename) self.dir = os.path.abspath(fpath) content = mlnpath(filename).content if self.import_hook is not None: self.import_hook(fname, content) self.update_file_choices() self.selected_file.set(fname) self.dirty = True
def parse_db(mln, content, ignore_unknown_preds=False, db=None, dirs=['.'], projectpath=None): ''' Reads one or more databases in a string representation and returns the respective Database objects. :param mln: the MLN object which should be used to load the database. :param content: the string representation of one or multiple ('---'-separated) databases :param ignore_unknown_preds: by default this function raises an Exception when it encounters a predicate in the DB that has not been declared in the associated MLN. ignore_unknown_preds=True simply ignores such predicates. :param db: the Database object that shall receive the facts stored in the new DB. If None, a new `Database` object will be created. ''' log = logging.getLogger('db') content = stripComments(content) allow_multiple = True if db is None: allow_multiple = True db = Database(mln, ignore_unknown_preds=ignore_unknown_preds) dbs = [] # expand domains with dbtext constants and save evidence for line, l in enumerate(content.split("\n")): l = l.strip() if l == '': continue # separator between independent databases elif l == '---' and not db.isempty(): dbs.append(db) db = Database(mln) continue # domain declaration elif "{" in l: domname, constants = db.mln.logic.parse_domain(l) domnames = [domname for _ in constants] # include elif l.startswith('#include'): filename = l[len("#include "):].strip() m = re.match(r'"(?P<filename>.+)"', filename) if m is not None: filename = m.group('filename') # if the path is relative, look for the respective file # relatively to all paths specified. Take the first file matching. if not mlnpath(filename).exists: includefilename = None for d in dirs: mlnp = '/'.join([d, filename]) if mlnpath(mlnp).exists: includefilename = mlnp break if includefilename is None: raise Exception('File not found: %s' % filename) else: includefilename = filename else: m = re.match(r'<(?P<filename>.+)>', filename) if m is not None: filename = m.group('filename') else: raise MLNParsingError('Malformed #include statement: %s' % line) if projectpath is None: raise MLNParsingError('No project specified: Cannot locate import from project: %s' % filename) includefilename = ':'.join([projectpath, filename]) logger.debug('Including file: "%s"' % includefilename) p = mlnpath(includefilename) dbs.extend(parse_db(content=mlnpath(includefilename).content, ignore_unknown_preds=ignore_unknown_preds, dirs=[p.resolve_path()]+dirs, projectpath=ifNone(p.project, projectpath, lambda x: '/'.join(p.path+[x])), mln=mln)) continue # valued evidence elif l[0] in "0123456789": s = l.find(" ") gndatom = l[s + 1:].replace(" ", "") value = float(l[:s]) if value < 0 or value > 1: raise Exception('Valued evidence must be in [0,1]') if gndatom in db.evidence: raise Exception("Duplicate soft evidence for '%s'" % gndatom) try: positive, predname, constants = mln.logic.parse_literal(gndatom) # TODO Should we allow soft evidence on non-atoms here? (This assumes atoms) except NoSuchPredicateError, e: if ignore_unknown_preds: continue else: raise e domnames = mln.predicate(predname).argdoms db << (gndatom, value) # literal else: if l[0] == "?": raise Exception("Unknown literals not supported (%s)" % l) # this is an Alchemy feature try: true, predname, constants = mln.logic.parse_literal(l) except NoSuchPredicateError, e: if ignore_unknown_preds: continue else: raise e except Exception, e: traceback.print_exc() raise MLNParsingError('Error parsing line %d: %s (%s)' % (line+1, l, e.message))
def infer(self, savegeometry=True, options={}, *args): mln_content = self.mln_container.editor.get("1.0", END).strip() db_content = self.db_container.editor.get("1.0", END).strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print((headline('PRACMLN QUERY TOOL'))) print() if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln( mln_content, searchpaths=[self.dir], projectpath=os.path.join(self.dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('emlnarg') is not None: emln_content = mlnpath(options.get('emlnarg')).content else: emln_content = self.emln_container.editor.get("1.0", END).strip() if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get( 'ignore_unknown_preds', True)) else: out(self.config.get('ignore_unknown_preds', True)) dbobj = parse_db(mlnobj, db_content, ignore_unknown_preds=self.config.get( 'ignore_unknown_preds', True)) if options.get('queryarg') is not None: self.config["queries"] = options.get('queryarg') infer = MLNQuery(config=self.config, mln=mlnobj, db=dbobj, emln=emln_content) result = infer.run() # write to file if run from commandline, otherwise save result to project results if options.get('outputfile') is not None: output = io.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format( os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = io.StringIO() result.write(output) fname = self.output_filename.get() self.project.add_result(fname, output.getvalue()) self.project.save(dirpath=self.dir) logger.info( 'saved result to file results/{} in project {}'.format( fname, self.project.name)) else: logger.debug( 'No output file given - results have not been saved.') except: traceback.print_exc() # restore main window sys.stdout.flush() self.master.deiconify()
def parse_mln(text, searchpaths=['.'], projectpath=None, logic='FirstOrderLogic', grammar='PRACGrammar', mln=None): ''' Reads an MLN from a stream providing a 'read' method. ''' dirs = [ os.path.abspath(os.path.expandvars(os.path.expanduser(p))) for p in searchpaths ] formulatemplates = [] text = str(text) if text == "": raise MLNParsingError( "No MLN content to construct model from was given; must specify either file/list of files or content string!" ) # replace some meta-directives in comments text = re.compile(r'//\s*<group>\s*$', re.MULTILINE).sub("#group", text) text = re.compile(r'//\s*</group>\s*$', re.MULTILINE).sub("#group.", text) # remove comments text = stripComments(text) if mln is None: mln = MLN(logic, grammar) # read lines mln.hard_formulas = [] templateIdx2GroupIdx = {} inGroup = False idxGroup = -1 fixWeightOfNextFormula = False fuzzy = False pseudofuzzy = False uniquevars = None fixedWeightTemplateIndices = [] lines = text.split("\n") iLine = 0 while iLine < len(lines): line = lines[iLine] iLine += 1 line = line.strip() try: if len(line) == 0: continue # meta directives if line == "#group": idxGroup += 1 inGroup = True continue elif line == "#group.": inGroup = False continue elif line.startswith("#fixweight"): fixWeightOfNextFormula = True continue elif line.startswith('#fuzzy'): if not isinstance(mln.logic, FuzzyLogic): logger.warning( 'Fuzzy declarations are not supported in %s. Treated as a binary predicate.' % mln.logic.__class__.__name__) pseudofuzzy = True else: fuzzy = True continue elif line.startswith("#include"): filename = line[len("#include "):].strip() m = re.match(r'"(?P<filename>.+)"', filename) if m is not None: filename = m.group('filename') # if the path is relative, look for the respective file # relatively to all paths specified. Take the first file matching. if not mlnpath(filename).exists: includefilename = None for d in dirs: mlnp = '/'.join([d, filename]) if mlnpath(mlnp).exists: includefilename = mlnp break if includefilename is None: raise Exception('File not found: %s' % filename) else: includefilename = filename else: m = re.match(r'<(?P<filename>.+)>', filename) if m is not None: filename = m.group('filename') else: raise MLNParsingError( 'Malformed #include statement: %s' % line) if projectpath is None: raise MLNParsingError( 'No project specified: Cannot locate import from project: %s' % filename) includefilename = ':'.join([projectpath, filename]) logger.debug('Including file: "%s"' % includefilename) p = mlnpath(includefilename) parse_mln(text=mlnpath(includefilename).content, searchpaths=[p.resolve_path()] + dirs, projectpath=ifnone(p.project, projectpath, lambda x: '/'.join(p.path + [x])), logic=logic, grammar=grammar, mln=mln) continue elif line.startswith('#unique'): try: uniVars = re.search('#unique{(.+)}', line) uniVars = uniVars.groups()[0] uniVars = map(str.strip, uniVars.split(',')) uniquevars = uniVars except: raise MLNParsingError( 'Malformed #unique expression: "%s"' % line) continue elif line.startswith( "#AdaptiveMLNDependency" ): # declared as "#AdaptiveMLNDependency:pred:domain"; seems to be deprecated depPredicate, domain = line.split(":")[1:3] if hasattr(mln, 'AdaptiveDependencyMap'): if depPredicate in mln.AdaptiveDependencyMap: mln.AdaptiveDependencyMap[depPredicate].add(domain) else: mln.AdaptiveDependencyMap[depPredicate] = set([domain]) else: mln.AdaptiveDependencyMap = {depPredicate: set([domain])} continue # domain decl if '=' in line: # try normal domain definition parse = mln.logic.parse_domain(line) if parse is not None: domName, constants = parse domName = str(domName) constants = map(str, constants) if domName in mln.domains: logger.debug( "Domain redefinition: Domain '%s' is being updated with values %s." % (domName, str(constants))) if domName not in mln.domains: mln.domains[domName] = [] mln.constant(domName, *constants) mln.domain_decls.append(line) continue # prior probability requirement if line.startswith("P("): m = re.match(r"P\((.*?)\)\s*=\s*([\.\de]+)", line) if m is None: raise MLNParsingError( "Prior probability constraint formatted incorrectly: %s" % line) mln.prior(f=mln.logic.parse_formula(m.group(1)), p=float(m.group(2))) continue # posterior probability requirement/soft evidence if line.startswith("R(") or line.startswith("SE("): m = re.match(r"(?:R|SE)\((.*?)\)\s*=\s*([\.\de]+)", line) if m is None: raise MLNParsingError( "Posterior probability constraint formatted incorrectly: %s" % line) mln.posterior(f=mln.logic.parse_formula(m.group(1)), p=float(m.group(2))) continue # variable definition if re.match(r'(\$\w+)\s*=(.+)', line): m = re.match(r'(\$\w+)\s*=(.+)', line) if m is None: raise MLNParsingError("Variable assigment malformed: %s" % line) mln.vars[m.group(1)] = "%s" % m.group(2).strip() continue # predicate decl or formula with weight else: isHard = False isPredDecl = False if line[-1] == '.': # hard (without explicit weight -> determine later) isHard = True formula = line[:-1] else: # with weight # try predicate declaration isPredDecl = True try: pred = mln.logic.parse_predicate(line) except Exception, e: isPredDecl = False if isPredDecl: predname = str(pred[0]) argdoms = map(str, pred[1]) softmutex = False mutex = None for i, dom in enumerate(argdoms): if dom[-1] in ('!', '?'): if mutex is not None: raise Exception( 'More than one arguments are specified as (soft-)functional' ) if fuzzy: raise Exception( '(Soft-)functional predicates must not be fuzzy.' ) mutex = i if dom[-1] == '?': softmutex = True argdoms = map(lambda x: x.strip('!?'), argdoms) pred = None if mutex is not None: if softmutex: pred = SoftFunctionalPredicate( predname, argdoms, mutex) else: pred = FunctionalPredicate(predname, argdoms, mutex) elif fuzzy: pred = FuzzyPredicate(predname, argdoms) fuzzy = False else: pred = Predicate(predname, argdoms) if pseudofuzzy: mln.fuzzypreds.append(predname) pseudofuzzy = False mln.predicate(pred) continue else: # formula (template) with weight or terminated by '.' if not isHard: spacepos = line.find(' ') weight = line[:spacepos] formula = line[spacepos:].strip() try: formula = mln.logic.parse_formula(formula) if isHard: weight = HARD # not set until instantiation when other weights are known idxTemplate = len(formulatemplates) formulatemplates.append(formula) fixweight = False if inGroup: templateIdx2GroupIdx[idxTemplate] = idxGroup if fixWeightOfNextFormula == True: fixWeightOfNextFormula = False fixweight = True fixedWeightTemplateIndices.append(idxTemplate) # expand predicate groups for variant in formula.expandgrouplits(): mln.formula(variant, weight=weight, fixweight=fixweight, unique_templvars=uniquevars) if uniquevars: uniquevars = None except ParseException, e: raise MLNParsingError("Error parsing formula '%s'\n" % formula) if fuzzy and not isPredDecl: raise Exception( '"#fuzzy" decorator not allowed at this place: %s' % line)
from pracmln.utils.project import mlnpath import pandas as pd import numpy as np pd.options.mode.chained_assignment = None result_file_name = "test5_query_out_2.txt" #raw_input() df_clean = pd.read_csv("hospital_clean_datatable.csv") df_dirty = pd.read_csv("holo_hosp_data_dirty_from_mysql.csv") df_mln_clean = df_dirty.copy() path = '/home/seismic/PUD-Research-Project/examples/test_mln5.pracmln:' + result_file_name p = mlnpath(path) content = p.content print content """ #to make above code work #added code to line 489 of /home/seismic/.local/lib/python2.7/site-packages/pracmln/utils/project.py elif fileext == 'txt': res = proj.results.get(self.file) if res is None: raise Exception('Project %s does not contain results named %s' % (self.project, self.file)) return res """ line1 = content.split("\n") print line1[0] entry1 = line1[0].split("%")
def parse_db(mln, content, ignore_unknown_preds=False, db=None, dirs=['.'], projectpath=None): ''' Reads one or more databases in a string representation and returns the respective Database objects. :param mln: the MLN object which should be used to load the database. :param content: the string representation of one or multiple ('---'-separated) databases :param ignore_unknown_preds: by default this function raises an Exception when it encounters a predicate in the DB that has not been declared in the associated MLN. ignore_unknown_preds=True simply ignores such predicates. :param db: the Database object that shall receive the facts stored in the new DB. If None, a new `Database` object will be created. ''' log = logs.getlogger('db') content = stripComments(content) allow_multiple = True if db is None: allow_multiple = True db = Database(mln, ignore_unknown_preds=ignore_unknown_preds) dbs = [] # expand domains with dbtext constants and save evidence for line, l in enumerate(content.split("\n")): l = l.strip() if l == '': continue # separator between independent databases elif l == '---' and not db.isempty(): dbs.append(db) db = Database(mln) continue # domain declaration elif "{" in l: domname, constants = db.mln.logic.parse_domain(l) domnames = [domname for _ in constants] # include elif l.startswith('#include'): filename = l[len("#include "):].strip() m = re.match(r'"(?P<filename>.+)"', filename) if m is not None: filename = m.group('filename') # if the path is relative, look for the respective file # relatively to all paths specified. Take the first file matching. if not mlnpath(filename).exists: includefilename = None for d in dirs: mlnp = '/'.join([d, filename]) if mlnpath(mlnp).exists: includefilename = mlnp break if includefilename is None: raise Exception('File not found: %s' % filename) else: includefilename = filename else: m = re.match(r'<(?P<filename>.+)>', filename) if m is not None: filename = m.group('filename') else: raise MLNParsingError('Malformed #include statement: %s' % line) if projectpath is None: raise MLNParsingError('No project specified: Cannot locate import from project: %s' % filename) includefilename = ':'.join([projectpath, filename]) logger.debug('Including file: "%s"' % includefilename) p = mlnpath(includefilename) dbs.extend(parse_db(content=mlnpath(includefilename).content, ignore_unknown_preds=ignore_unknown_preds, dirs=[p.resolve_path()]+dirs, projectpath=ifnone(p.project, projectpath, lambda x: '/'.join(p.path+[x])), mln=mln)) continue # valued evidence elif l[0] in "0123456789": s = l.find(" ") gndatom = l[s + 1:].replace(" ", "") value = float(l[:s]) if value < 0 or value > 1: raise Exception('Valued evidence must be in [0,1]') if gndatom in db.evidence: raise Exception("Duplicate soft evidence for '%s'" % gndatom) try: _, predname, constants = mln.logic.parse_literal(gndatom) # TODO Should we allow soft evidence on non-atoms here? (This assumes atoms) except NoSuchPredicateError, e: if ignore_unknown_preds: continue else: raise e domnames = mln.predicate(predname).argdoms db << (gndatom, value) # literal else: if l[0] == "?": raise Exception("Unknown literals not supported (%s)" % l) # this is an Alchemy feature try: true, predname, constants = mln.logic.parse_literal(l) except NoSuchPredicateError, e: if ignore_unknown_preds: continue else: raise e except Exception, e: traceback.print_exc() raise MLNParsingError('Error parsing line %d: %s (%s)' % (line+1, l, e.message))
def parse_mln(text, searchpaths=['.'], projectpath=None, logic='FirstOrderLogic', grammar='PRACGrammar', mln=None): ''' Reads an MLN from a stream providing a 'read' method. ''' dirs = [os.path.abspath(os.path.expandvars(os.path.expanduser(p))) for p in searchpaths] formulatemplates = [] text = str(text) if text == "": raise MLNParsingError("No MLN content to construct model from was given; must specify either file/list of files or content string!") # replace some meta-directives in comments text = re.compile(r'//\s*<group>\s*$', re.MULTILINE).sub("#group", text) text = re.compile(r'//\s*</group>\s*$', re.MULTILINE).sub("#group.", text) # remove comments text = stripComments(text) if mln is None: mln = MLN(logic, grammar) # read lines mln.hard_formulas = [] templateIdx2GroupIdx = {} inGroup = False idxGroup = -1 fixWeightOfNextFormula = False fuzzy = False pseudofuzzy = False uniquevars = None fixedWeightTemplateIndices = [] lines = text.split("\n") iLine = 0 while iLine < len(lines): line = lines[iLine] iLine += 1 line = line.strip() try: if len(line) == 0: continue # meta directives if line == "#group": idxGroup += 1 inGroup = True continue elif line == "#group.": inGroup = False continue elif line.startswith("#fixweight"): fixWeightOfNextFormula = True continue elif line.startswith('#fuzzy'): if not isinstance(mln.logic, FuzzyLogic): logger.warning('Fuzzy declarations are not supported in %s. Treated as a binary predicate.' % mln.logic.__class__.__name__) pseudofuzzy = True else: fuzzy = True continue elif line.startswith("#include"): filename = line[len("#include "):].strip() m = re.match(r'"(?P<filename>.+)"', filename) if m is not None: filename = m.group('filename') # if the path is relative, look for the respective file # relatively to all paths specified. Take the first file matching. if not mlnpath(filename).exists: includefilename = None for d in dirs: mlnp = '/'.join([d, filename]) if mlnpath(mlnp).exists: includefilename = mlnp break if includefilename is None: raise Exception('File not found: %s' % filename) else: includefilename = filename else: m = re.match(r'<(?P<filename>.+)>', filename) if m is not None: filename = m.group('filename') else: raise MLNParsingError('Malformed #include statement: %s' % line) if projectpath is None: raise MLNParsingError('No project specified: Cannot locate import from project: %s' % filename) includefilename = ':'.join([projectpath, filename]) logger.debug('Including file: "%s"' % includefilename) p = mlnpath(includefilename) parse_mln(text=mlnpath(includefilename).content, searchpaths=[p.resolve_path()]+dirs, projectpath=ifNone(p.project, projectpath, lambda x: '/'.join(p.path+[x])), logic=logic, grammar=grammar, mln=mln) continue elif line.startswith('#unique'): try: uniVars = re.search('#unique{(.+)}', line) uniVars = uniVars.groups()[0] uniVars = map(str.strip, uniVars.split(',')) uniquevars = uniVars except: raise MLNParsingError('Malformed #unique expression: "%s"' % line) continue elif line.startswith("#AdaptiveMLNDependency"): # declared as "#AdaptiveMLNDependency:pred:domain"; seems to be deprecated depPredicate, domain = line.split(":")[1:3] if hasattr(mln, 'AdaptiveDependencyMap'): if depPredicate in mln.AdaptiveDependencyMap: mln.AdaptiveDependencyMap[depPredicate].add(domain) else: mln.AdaptiveDependencyMap[depPredicate] = set([domain]) else: mln.AdaptiveDependencyMap = {depPredicate:set([domain])} continue # domain decl if '=' in line: # try normal domain definition parse = mln.logic.parse_domain(line) if parse is not None: domName, constants = parse domName = str(domName) constants = map(str, constants) if domName in mln.domains: logger.debug("Domain redefinition: Domain '%s' is being updated with values %s." % (domName, str(constants))) if domName not in mln.domains: mln.domains[domName] = [] mln.constant(domName, *constants) mln.domain_decls.append(line) continue # prior probability requirement if line.startswith("P("): m = re.match(r"P\((.*?)\)\s*=\s*([\.\de]+)", line) if m is None: raise MLNParsingError("Prior probability constraint formatted incorrectly: %s" % line) mln.prior(f=mln.logic.parse_formula(m.group(1)), p=float(m.group(2))) continue # posterior probability requirement/soft evidence if line.startswith("R(") or line.startswith("SE("): m = re.match(r"(?:R|SE)\((.*?)\)\s*=\s*([\.\de]+)", line) if m is None: raise MLNParsingError("Posterior probability constraint formatted incorrectly: %s" % line) mln.posterior(f=mln.logic.parse_formula(m.group(1)), p=float(m.group(2))) continue # variable definition if re.match(r'(\$\w+)\s*=(.+)', line): m = re.match(r'(\$\w+)\s*=(.+)', line) if m is None: raise MLNParsingError("Variable assigment malformed: %s" % line) mln.vars[m.group(1)] = "%s" % m.group(2).strip() continue # predicate decl or formula with weight else: isHard = False isPredDecl = False if line[ -1] == '.': # hard (without explicit weight -> determine later) isHard = True formula = line[:-1] else: # with weight # try predicate declaration isPredDecl = True try: pred = mln.logic.parse_predicate(line) except Exception, e: isPredDecl = False if isPredDecl: predname = str(pred[0]) argdoms = map(str, pred[1]) softmutex = False mutex = None for i, dom in enumerate(argdoms): if dom[-1] in ('!', '?'): if mutex is not None: raise Exception('More than one arguments are specified as (soft-)functional') if fuzzy: raise Exception('(Soft-)functional predicates must not be fuzzy.') mutex = i if dom[-1] == '?': softmutex = True argdoms = map(lambda x: x.strip('!?'), argdoms) pred = None if mutex is not None: if softmutex: pred = SoftFunctionalPredicate(predname, argdoms, mutex) else: pred = FunctionalPredicate(predname, argdoms, mutex) elif fuzzy: pred = FuzzyPredicate(predname, argdoms) fuzzy = False else: pred = Predicate(predname, argdoms) if pseudofuzzy: mln.fuzzypreds.append(predname) pseudofuzzy = False mln.predicate(pred) continue else: # formula (template) with weight or terminated by '.' if not isHard: spacepos = line.find(' ') weight = line[:spacepos] formula = line[spacepos:].strip() try: formula = mln.logic.parse_formula(formula) if isHard: weight = HARD # not set until instantiation when other weights are known idxTemplate = len(formulatemplates) formulatemplates.append(formula) fixweight = False if inGroup: templateIdx2GroupIdx[idxTemplate] = idxGroup if fixWeightOfNextFormula == True: fixWeightOfNextFormula = False fixweight = True fixedWeightTemplateIndices.append(idxTemplate) # expand predicate groups for variant in formula.expandgrouplits(): mln.formula(variant, weight=weight, fixweight=fixweight, unique_templvars=uniquevars) if uniquevars: uniquevars = None except ParseException, e: raise MLNParsingError("Error parsing formula '%s'\n" % formula) if fuzzy and not isPredDecl: raise Exception('"#fuzzy" decorator not allowed at this place: %s' % line)
def infer(self, savegeometry=True, options={}, *args): mln_content = self.mln_container.editor.get("1.0", END).encode('utf8').strip() db_content = self.db_container.editor.get("1.0", END).encode('utf8').strip() # create conf from current gui settings self.update_config() # write gui settings self.write_gconfig(savegeometry=savegeometry) # hide gui self.master.withdraw() try: print headline('PRACMLN QUERY TOOL') print if options.get('mlnarg') is not None: mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) else: mlnobj = parse_mln(mln_content, searchpaths=[self.dir], projectpath=os.path.join(self.dir, self.project.name), logic=self.config.get('logic', 'FirstOrderLogic'), grammar=self.config.get('grammar', 'PRACGrammar')) if options.get('emlnarg') is not None: emln_content = mlnpath(options.get('emlnarg')).content else: emln_content = self.emln_container.editor.get("1.0", END).encode('utf8').strip() if options.get('dbarg') is not None: dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) else: out(self.config.get('ignore_unknown_preds', True)) dbobj = parse_db(mlnobj, db_content, ignore_unknown_preds=self.config.get('ignore_unknown_preds', True)) if options.get('queryarg') is not None: self.config["queries"] = options.get('queryarg') infer = MLNQuery(config=self.config, mln=mlnobj, db=dbobj, emln=emln_content) result = infer.run() # write to file if run from commandline, otherwise save result to project results if options.get('outputfile') is not None: output = StringIO.StringIO() result.write(output) with open(os.path.abspath(options.get('outputfile')), 'w') as f: f.write(output.getvalue()) logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile')))) elif self.save.get(): output = StringIO.StringIO() result.write(output) fname = self.output_filename.get() self.project.add_result(fname, output.getvalue()) self.project.save(dirpath=self.dir) logger.info('saved result to file results/{} in project {}'.format(fname, self.project.name)) else: logger.debug('No output file given - results have not been saved.') except: traceback.print_exc() # restore main window sys.stdout.flush() self.master.deiconify()