Ejemplo n.º 1
0
    def __call__(self, pracinference, **params):
        logger.info('Running {}'.format(self.name))

        print prac_heading('Recognizing Objects')

        # load default project
        projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject)
        project = MLNProject.open(projectpath)

        inf_step = PRACInferenceStep(pracinference, self)
        dbs = pracinference.inference_steps[-1].output_dbs

        mlntext = project.mlns.get(project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=project.queryconf.get('logic', 'FuzzyLogic'),
                        grammar=project.queryconf.get('grammar',
                                                      'PRACGrammar'))

        wordnet_module = self.prac.module('wn_senses')

        # adding evidence properties to new query db
        for db in dbs:
            # find properties and add word similarities
            logger.error(db.domains)
            logger.error(mln.domains)
            output_db = wordnet_module.add_similarities(db, mln)
            output_db.write()

            # infer and update output dbs
            infer = self.mlnquery(config=project.queryconf,
                                  db=output_db, mln=mln)
            result_db = infer.resultdb

            inf_step.outdbs.append(result_db)

        return inf_step
Ejemplo n.º 2
0
    def __call__(self, node, **params):
        # ======================================================================
        # Initialization
        # ======================================================================
        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Recognizing Action Cores'))

        if params.get('project', None) is None:
            # load default project
            projectpath = os.path.join(pracloc.pracmodules, self.name,
                                       self.defproject)
            ac_project = MLNProject.open(projectpath)
        else:
            logger.info(
                colorize('Loading Project from params', (None, 'cyan', True),
                         True))
            projectpath = os.path.join(
                params.get('projectpath', None)
                or os.path.join(pracloc.pracmodules, self.name),
                params.get('project').name)
            ac_project = params.get('project')

        dbs = node.outdbs

        mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=ac_project.queryconf.get('logic',
                                                       'FirstOrderLogic'),
                        grammar=ac_project.queryconf.get(
                            'grammar', 'PRACGrammar'))
        known_concepts = mln.domains.get('concept', [])
        infstep = PRACInferenceStep(node, self)
        wnmod = self.prac.module('wn_senses')

        pngs = {}
        nlinstr = node.nlinstr()
        sidx = nlinstr.idx()
        sentence = nlinstr.instr

        for db_ in dbs:
            # ==================================================================
            # Preprocessing
            # ==================================================================
            db = wnmod.get_senses_and_similarities(db_, known_concepts)
            tmp_union_db = db.union(db_, mln=self.prac.mln)
            infstep.indbs.append(tmp_union_db)

            # ==================================================================
            # Inference
            # ==================================================================
            infer = self.mlnquery(config=ac_project.queryconf,
                                  verbose=self.prac.verbose > 2,
                                  db=tmp_union_db,
                                  mln=mln)
            resultdb = infer.resultdb
            if self.prac.verbose == 2:
                print()
                print(prac_heading('INFERENCE RESULTS'))
                infer.write()
            # ==================================================================
            # Postprocessing
            # ==================================================================
            unified_db = resultdb.union(tmp_union_db, mln=self.prac.mln)

            #             infstep.outdbs
            infstep.outdbs.extend(
                self.extract_multiple_action_cores(self.prac, unified_db,
                                                   wnmod, known_concepts))

            pngs[unified_db.domains.get('actioncore',
                                        [None])[0]] = get_cond_prob_png(
                                            ac_project.queryconf.get(
                                                'queries', ''),
                                            dbs,
                                            filename=self.name)
        infstep.png = pngs
        infstep.applied_settings = ac_project.queryconf.config
        pred = None
        for outdb in infstep.outdbs:
            #             out('in ac rec:')
            #             for w, ac in outdb.actioncores():
            #                 out(w, ac)
            for frame in node.pracinfer.buildframes(outdb, sidx, sentence):
                node_ = FrameNode(node.pracinfer,
                                  frame,
                                  node,
                                  pred,
                                  indbs=[outdb],
                                  prevmod=self.name)
                pred = node_
                yield node_
                break
            else:
                logger.error('no actioncore recognized in %s' % node)
                raise Exception('no actioncore recognized in %s' % node)
Ejemplo n.º 3
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Refining Actioncores'))
        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        #         if node.previous_module == 'achieved_by':
        #             raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
        # ======================================================================
        # Preprocessing
        # ======================================================================
        for olddb in dbs:
            infstep.indbs.append(olddb.copy())
            #To handle multiple acs in one task, we have to check if the single
            # dbs contain achieved_bys which representing already plans
            pngs = {}
            actioncore = node.frame.actioncore
            mod = self.prac.module('complex_achieved_by')
            newnodes = list(mod(node))
            n = None
            parentframes = [
                p.frame for p in node.parentspath()
                if isinstance(p, FrameNode)
            ]
            if any(n.frame in parentframes for n in newnodes):
                logger.error(
                    'aborting reasoning because of infinite loop. (%s)' %
                    node.frame)
                node.children = []
            else:
                for n in newnodes:
                    yield n
            if n is not None: return
            if n is None:
                # This list is used to avoid an infinite loop during the
                # achieved by inference.
                # To avoid this infinite loop, the list contains the pracmlns
                # which were inferenced during the process.
                # Every pracmln should be used only once during the process
                # because the evidence for the inference will always remain
                # the same.
                # So if the pracmln hadnt inferenced a plan in the first time,
                # it will never do it.

                # Need to remove possible achieved_by predicates from
                # previous achieved_by inferences
                db_ = PRACDatabase(self.prac)
                for atom, truth in sorted(olddb.evidence.items()):
                    if 'achieved_by' in atom: continue
                    db_ << (atom, truth)
                if params.get('project', None) is None:
                    logger.debug('Loading Project: {}.pracmln'.format(
                        colorize(actioncore, (None, 'cyan', True), True)))
                    projectpath = os.path.join(pracloc.pracmodules, self.name,
                                               '{}.pracmln'.format(actioncore))
                    if os.path.exists(projectpath):
                        project = MLNProject.open(projectpath)
                    else:
                        infstep.outdbs.append(olddb)
                        logger.error(actioncore + ".pracmln does not exist.")
                        return
                else:
                    logger.debug(
                        colorize('Loading Project from params',
                                 (None, 'cyan', True), True))
                    projectpath = os.path.join(
                        params.get('projectpath', None)
                        or os.path.join(pracloc.pracmodules, self.name),
                        params.get('project').name)
                    project = params.get('project')

                mlntext = project.mlns.get(project.queryconf['mln'], None)
                mln = parse_mln(
                    mlntext,
                    searchpaths=[self.module_path],
                    projectpath=projectpath,
                    logic=project.queryconf.get('logic', 'FirstOrderLogic'),
                    grammar=project.queryconf.get('grammar', 'PRACGrammar'))
                known_concepts = mln.domains.get('concept', [])
                wnmod = self.prac.module('wn_senses')

                #Merge domains of db and given mln to avoid errors due to role inference and the resulting missing fuzzy perdicates
                known_concepts = list(
                    set(known_concepts).union(
                        set(db_.domains.get('concept', []))))
                db = wnmod.get_senses_and_similarities(db_, known_concepts)

                unified_db = db_.union(db)
                dbnew = wnmod.add_sims(unified_db, unified_db)

                # Inference achieved_by predicate
                db_ = self.extendDBWithAchievedByEvidence(
                    dbnew, mln, actioncore)
                # ==============================================================
                # Inference
                # ==============================================================
                #                 db_.write()
                try:
                    infer = self.mlnquery(config=project.queryconf,
                                          verbose=self.prac.verbose > 2,
                                          db=db_,
                                          mln=mln)
                except NoConstraintsError:
                    logger.error(
                        'achieved_by inference failed due to NoConstraintsError: %s'
                        % node.frame)
                    return
                result_db = infer.resultdb

                if self.prac.verbose == 2:
                    print()
                    print(prac_heading('INFERENCE RESULTS'))
                    infer.write()
                # ==============================================================
                # Postprocessing
                # ==============================================================
                # unified_db = result_db.union(kb.query_mln, db_)
                # only add inferred achieved_by atoms, leave out
                # 0-evidence atoms
                for qa in result_db.query('achieved_by(?ac1,?ac2)'):
                    if qa['?ac2'] == 'Complex': continue
                    unified_db << 'achieved_by({},{})'.format(
                        qa['?ac1'], qa['?ac2'])
                    pngs[qa['?ac2']] = get_cond_prob_png(project.queryconf.get(
                        'queries', ''),
                                                         dbs,
                                                         filename=self.name)
                    newframe = Frame(self.prac,
                                     node.frame.sidx,
                                     '',
                                     words=[],
                                     syntax=[],
                                     actioncore=qa['?ac2'],
                                     actionroles={})
                    #                     out('->', newframe)
                    infstep.outdbs.append(unified_db)
                    yield FrameNode(node.pracinfer,
                                    newframe,
                                    node,
                                    pred=None,
                                    indbs=[unified_db],
                                    prevmod=self.name)
                    return
                infstep.outdbs.append(unified_db)


#             raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
Ejemplo n.º 4
0
    def infer(self, savegeometry=True, options={}, *args):
        mln_content = self.mln_container.editor.get("1.0", END).strip()
        db_content = self.db_container.editor.get("1.0", END).strip()

        # create conf from current gui settings
        self.update_config()

        # write gui settings
        self.write_gconfig(savegeometry=savegeometry)

        # hide gui
        self.master.withdraw()

        try:
            print((headline('PRACMLN QUERY TOOL')))
            print()

            if options.get('mlnarg') is not None:
                mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')),
                             logic=self.config.get('logic', 'FirstOrderLogic'),
                             grammar=self.config.get('grammar', 'PRACGrammar'))
            else:
                mlnobj = parse_mln(
                    mln_content,
                    searchpaths=[self.dir],
                    projectpath=os.path.join(self.dir, self.project.name),
                    logic=self.config.get('logic', 'FirstOrderLogic'),
                    grammar=self.config.get('grammar', 'PRACGrammar'))

            if options.get('emlnarg') is not None:
                emln_content = mlnpath(options.get('emlnarg')).content
            else:
                emln_content = self.emln_container.editor.get("1.0",
                                                              END).strip()

            if options.get('dbarg') is not None:
                dbobj = Database.load(mlnobj,
                                      dbfiles=[options.get('dbarg')],
                                      ignore_unknown_preds=self.config.get(
                                          'ignore_unknown_preds', True))
            else:
                out(self.config.get('ignore_unknown_preds', True))
                dbobj = parse_db(mlnobj,
                                 db_content,
                                 ignore_unknown_preds=self.config.get(
                                     'ignore_unknown_preds', True))

            if options.get('queryarg') is not None:
                self.config["queries"] = options.get('queryarg')

            infer = MLNQuery(config=self.config,
                             mln=mlnobj,
                             db=dbobj,
                             emln=emln_content)
            result = infer.run()

            # write to file if run from commandline, otherwise save result to project results
            if options.get('outputfile') is not None:
                output = io.StringIO()
                result.write(output)
                with open(os.path.abspath(options.get('outputfile')),
                          'w') as f:
                    f.write(output.getvalue())
                logger.info('saved result to {}'.format(
                    os.path.abspath(options.get('outputfile'))))
            elif self.save.get():
                output = io.StringIO()
                result.write(output)
                fname = self.output_filename.get()
                self.project.add_result(fname, output.getvalue())
                self.project.save(dirpath=self.dir)
                logger.info(
                    'saved result to file results/{} in project {}'.format(
                        fname, self.project.name))
            else:
                logger.debug(
                    'No output file given - results have not been saved.')
        except:
            traceback.print_exc()

        # restore main window
        sys.stdout.flush()
        self.master.deiconify()
Ejemplo n.º 5
0
    def run(self):
        watch = StopWatch()
        watch.tag('inference', self.verbose)
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        if self.use_emln and self.emln is not None:
            mlnstrio = io.StringIO()
            mln.write(mlnstrio)
            mlnstr = mlnstrio.getvalue()
            mlnstrio.close()
            emln = self.emln
            mln = parse_mln(mlnstr + emln,
                            grammar=self.grammar,
                            logic=self.logic)

        # load the database
        if isinstance(self.db, Database):
            db = self.db
        elif isinstance(self.db, list) and len(self.db) == 1:
            db = self.db[0]
        elif isinstance(self.db, list) and len(self.db) == 0:
            db = Database(mln)
        elif isinstance(self.db, list):
            raise Exception(
                'Got {} dbs. Can only handle one for inference.'.format(
                    len(self.db)))
        else:
            raise Exception('DB of invalid format {}'.format(type(self.db)))

        # expand the
        #  parameters
        params = dict(self._config)
        if 'params' in params:
            params.update(eval("dict(%s)" % params['params']))
            del params['params']
        params['verbose'] = self.verbose
        if self.verbose:
            print((tabulate(sorted(list(params.items()),
                                   key=lambda k_v: str(k_v[0])),
                            headers=('Parameter:', 'Value:'))))
        if type(db) is list and len(db) > 1:
            raise Exception('Inference can only handle one database at a time')
        elif type(db) is list:
            db = db[0]
        params['cw_preds'] = [x for x in self.cw_preds if bool(x)]
        # extract and remove all non-algorithm
        for s in GUI_SETTINGS:
            if s in params: del params[s]

        if self.profile:
            prof = Profile()
            print('starting profiler...')
            prof.enable()
        # set the debug level
        olddebug = logger.level
        logger.level = (eval('logs.%s' %
                             params.get('debug', 'WARNING').upper()))
        result = None
        try:
            mln_ = mln.materialize(db)
            mrf = mln_.ground(db)
            inference = self.method(mrf, self.queries, **params)
            if self.verbose:
                print()
                print((headline('EVIDENCE VARIABLES')))
                print()
                mrf.print_evidence_vars()

            result = inference.run()
            if self.verbose:
                print()
                print((headline('INFERENCE RESULTS')))
                print()
                inference.write()
            if self.verbose:
                print()
                inference.write_elapsed_time()
        except SystemExit:
            traceback.print_exc()
            print('Cancelled...')
        finally:
            if self.profile:
                prof.disable()
                print((headline('PROFILER STATISTICS')))
                ps = pstats.Stats(prof,
                                  stream=sys.stdout).sort_stats('cumulative')
                ps.print_stats()
            # reset the debug level
            logger.level = olddebug
        if self.verbose:
            print()
            watch.finish()
            watch.printSteps()
        return result
Ejemplo n.º 6
0
    def learn(self, savegeometry=True, options={}, *args):
        mln_content = self.mln_container.editor.get("1.0", END).encode('utf8').strip()
        db_content = self.db_container.editor.get("1.0", END).encode('utf8').strip()

        # create conf from current gui settings
        self.update_config()

        # write gui settings
        self.write_gconfig(savegeometry=savegeometry)

        # hide gui
        self.master.withdraw()

        try:
            print headline('PRAC LEARNING TOOL')
            print

            if options.get('mlnarg') is not None:
                mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')),
                             logic=self.config.get('logic', 'FirstOrderLogic'),
                             grammar=self.config.get('grammar', 'PRACGrammar'))
            else:
                mlnobj = parse_mln(mln_content, searchpaths=[self.project_dir],
                                   projectpath=os.path.join(self.project_dir, self.project.name),
                                   logic=self.config.get('logic', 'FirstOrderLogic'),
                                   grammar=self.config.get('grammar', 'PRACGrammar'))

            if options.get('dbarg') is not None:
                dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True))
            else:
                if self.config.get('pattern'):
                    local, dblist = self.get_training_db_paths(self.config.get('pattern').strip())
                    dbobj = []
                    # build database list from project dbs
                    if local:
                        for dbname in dblist:
                            dbobj.extend(parse_db(mlnobj, self.project.dbs[dbname].strip(),
                                         ignore_unknown_preds=self.config.get('ignore_unknown_preds', True),
                                         projectpath=os.path.join(self.dir, self.project.name)))
                        out(dbobj)
                    # build database list from filesystem dbs
                    else:
                        for dbpath in dblist:
                            dbobj.extend(Database.load(mlnobj, dbpath, ignore_unknown_preds= self.config.get('ignore_unknown_preds', True)))
                # build single db from currently selected db
                else:
                    dbobj = parse_db(mlnobj, db_content, projectpath=os.path.join(self.dir, self.project.name), dirs=[self.dir])

            learning = MLNLearn(config=self.config, mln=mlnobj, db=dbobj)
            result = learning.run()

            # write to file if run from commandline, otherwise save result
            # to project results
            if options.get('outputfile') is not None:
                output = StringIO.StringIO()
                result.write(output)
                with open(os.path.abspath(options.get('outputfile')), 'w') as f:
                    f.write(output.getvalue())
                logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile'))))
            elif self.save.get():
                output = StringIO.StringIO()
                result.write(output)
                self.project.add_mln(self.output_filename.get(), output.getvalue())
                self.mln_container.update_file_choices()
                self.project.save(dirpath=self.project_dir)
                logger.info('saved result to file mln/{} in project {}'.format(self.output_filename.get(), self.project.name))
            else:
                logger.debug("No output file given - results have not been saved.")
        except:
            traceback.print_exc()

        # restore gui
        sys.stdout.flush()
        self.master.deiconify()
Ejemplo n.º 7
0
    def run(self):
        watch = StopWatch()
        watch.tag('inference', self.verbose)
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        if self.use_emln and self.emln is not None:
            mlnstr = StringIO.StringIO()
            mln.write(mlnstr)
            mlnstr.close()
            mlnstr = str(mlnstr)
            emln = self.emln
            mln = parse_mln(mlnstr + emln,
                            grammar=self.grammar,
                            logic=self.logic)

        # load the database
        if isinstance(self.db, Database):
            db = self.db
        elif isinstance(self.db, list) and len(self.db) == 1:
            db = self.db[0]
        elif isinstance(self.db, list):
            raise Exception(
                'Got {} dbs. Can only handle one for inference.'.format(
                    len(self.db)))
        else:
            raise Exception('DB of invalid format {}'.format(type(self.db)))

        # expand the
        #  parameters
        params = dict(self._config)
        if 'params' in params:
            params.update(eval("dict(%s)" % params['params']))
            del params['params']
        if self.verbose:
            print tabulate(sorted(list(params.viewitems()),
                                  key=lambda (k, v): str(k)),
                           headers=('Parameter:', 'Value:'))
        # create the MLN and evidence database and the parse the queries
#         mln = parse_mln(modelstr, searchPath=self.dir.get(), logic=self.config['logic'], grammar=self.config['grammar'])
#         db = parse_db(mln, db_content, ignore_unknown_preds=params.get('ignore_unknown_preds', False))
        if type(db) is list and len(db) > 1:
            raise Exception('Inference can only handle one database at a time')
        elif type(db) is list:
            db = db[0]
        # parse non-atomic params


#         if type(self.queries) is not list:
#             queries = parse_queries(mln, str(self.queries))
        params['cw_preds'] = filter(lambda x: bool(x), self.cw_preds)
        # extract and remove all non-algorithm
        for s in GUI_SETTINGS:
            if s in params: del params[s]

        if self.profile:
            prof = Profile()
            print 'starting profiler...'
            prof.enable()
        # set the debug level
        olddebug = praclog.level()
        praclog.level(
            eval('logging.%s' % params.get('debug', 'WARNING').upper()))
        result = None
        try:
            mln_ = mln.materialize(db)
            mrf = mln_.ground(db)
            inference = self.method(mrf, self.queries, **params)
            if self.verbose:
                print
                print headline('EVIDENCE VARIABLES')
                print
                mrf.print_evidence_vars()

            result = inference.run()
            if self.verbose:
                print
                print headline('INFERENCE RESULTS')
                print
                inference.write()
            if self.verbose:
                print
                inference.write_elapsed_time()
        except SystemExit:
            print 'Cancelled...'
        finally:
            if self.profile:
                prof.disable()
                print headline('PROFILER STATISTICS')
                ps = pstats.Stats(prof,
                                  stream=sys.stdout).sort_stats('cumulative')
                ps.print_stats()
            # reset the debug level
            praclog.level(olddebug)
        if self.verbose:
            print
            watch.finish()
            watch.printSteps()
        return result
Ejemplo n.º 8
0
    def role_distributions(self, step):
        '''
        TODO

        :param step:
        :return:
        '''
        distrs = {}
        for db_ in step.output_dbs:
            for word in db_.domains['word']:
                for q in db_.query('action_core(?w,?ac)'):

                    # ==========================================================
                    # Initializaton
                    # ==========================================================

                    actioncore = q['?ac']
                    projectpath = os.path.join(self.module_path,
                                               '{}.pracmln'.format(actioncore))
                    project = MLNProject.open(projectpath)
                    mlntext = project.mlns.get(project.queryconf['mln'], None)
                    mln = parse_mln(
                        mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=project.queryconf.get('logic',
                                                    'FirstOrderLogic'),
                        grammar=project.queryconf.get('grammar',
                                                      'PRACGrammar'))

                    # ==========================================================
                    # Preprocessing
                    # ==========================================================

                    # add inferred concepts to known_concepts to display
                    # them in the graph. Ignore verbs and adjectives,
                    # as they do not have hypernym relations to nouns
                    concepts = self.prac.config.getlist('wordnet', 'concepts')
                    for con in db_.query('has_sense(?w,?s)'):
                        if con['?s'].split('.')[1] in ['a', 's', 'v']:
                            continue
                        concepts.append(con['?s'])
                    wn = WordNet(concepts=concepts)

                    db = db_.copy(mln=mln)
                    for qs in db_.query('!(EXIST ?w (has_sense(?w,?s)))'):
                        db.rmval('sense', qs['?s'])
                    for concept in db_.domains['concept']:
                        if concept not in mln.domains['concept']:
                            db.rmval('concept', concept)
                    for res in db_.query('has_sense({}, ?s)'.format(word)):
                        sense = res['?s']
                        if sense == 'null': continue
                        roles = self.prac.actioncores[actioncore].roles
                        role = None
                        for r in roles:
                            vars = [
                                '?v{}'.format(i) for i in range(
                                    len(db_.mln.predicate(r).argdoms) - 1)
                            ]
                            br = False
                            for qr in db_.query('{}({},{})'.format(
                                    r, ','.join(vars), actioncore)):
                                for v in vars:
                                    if qr[v] == word:
                                        role = r
                                        br = True
                                        break
                                if br: break
                            if br: break
                        if role is None: continue
                        db.retract('has_sense({},{})'.format(word, sense))
                        add_all_wordnet_similarities(db, wn)

                        # ======================================================
                        # Inference
                        # ======================================================

                        infer = self.mlnquery(method='EnumerationAsk',
                                              mln=mln,
                                              db=db,
                                              queries='has_sense',
                                              cw=True,
                                              multicore=True,
                                              verbose=self.prac.verbose > 2)

                        result = infer.resultdb

                        if self.prac.verbose == 2:
                            print
                            print prac_heading('INFERENCE RESULTS')
                            print
                            infer.write()

                        # ======================================================
                        # Graph generation
                        # ======================================================

                        g = wn.to_dot()
                        maxprob = 0.
                        for atom, truth in result.gndatoms():
                            _, predname, args = db.mln.logic.parse_literal(
                                atom)
                            concept = args[1]
                            if predname == 'has_sense' and args[
                                    0] == word and concept != 'null':
                                maxprob = max(maxprob, truth)

                        for atom, truth in result.gndatoms():
                            _, predname, args = db.mln.logic.parse_literal(
                                atom)
                            concept = args[1]
                            if predname == 'has_sense' and args[
                                    0] == word and concept != 'null':
                                if concept in concepts:
                                    g.node(concept,
                                           fillcolor=get_prob_color(truth /
                                                                    maxprob))
                        distrs[role] = render_gv(g)
        return distrs
Ejemplo n.º 9
0
    def __call__(self, pracinference, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Recognizing Control Structures'))

        if params.get('project', None) is None:
            # load default project
            projectpath = os.path.join(pracloc.pracmodules, self.name,
                                       self.defproject)
            ac_project = MLNProject.open(projectpath)
        else:
            logger.info(
                colorize('Loading Project from params', (None, 'cyan', True),
                         True))
            projectpath = os.path.join(
                params.get('projectpath', None)
                or os.path.join(pracloc.pracmodules, self.name),
                params.get('project').name)
            ac_project = params.get('project')

        dbs = pracinference.inference_steps[-1].output_dbs

        mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=ac_project.queryconf.get('logic',
                                                       'FirstOrderLogic'),
                        grammar=ac_project.queryconf.get(
                            'grammar', 'PRACGrammar'))
        inf_step = PRACInferenceStep(pracinference, self)

        pngs = {}
        for i, db in enumerate(dbs):
            db_ = db.copy()

            # ======================================================================
            # Inference
            # ======================================================================

            infer = self.mlnquery(config=ac_project.queryconf, db=db, mln=mln)
            result_db = infer.resultdb

            if self.prac.verbose == 2:
                print()
                print(prac_heading('INFERENCE RESULTS'))
                infer.write()

            # ==========================================================
            # Postprocessing
            # ==========================================================

            for q in result_db.query('event(?w,?ac)'):
                db_ << 'event({},{})'.format(q['?w'], q['?ac'])
            for q in result_db.query('condition(?w)'):
                db_ << 'condition({})'.format(q['?w'])

            inf_step.output_dbs.append(db_)
            pngs['CS' + str(i)] = get_cond_prob_png(ac_project.queryconf.get(
                'queries', ''),
                                                    dbs,
                                                    filename=self.name)
            inf_step.png = pngs

        inf_step.applied_settings = ac_project.queryconf.config

        return inf_step
Ejemplo n.º 10
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Property Extraction'))

        if params.get('project', None) is None:
            # load default project
            projectpath = os.path.join(pracloc.pracmodules, self.name,
                                       self.defproject)
            project = MLNProject.open(projectpath)
        else:
            # load project from params
            projectpath = os.path.join(
                params.get('projectpath', None)
                or os.path.join(pracloc.pracmodules, self.name),
                params.get('project').name)
            project = params.get('project')

        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)

        mlntext = project.mlns.get(project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=project.queryconf.get('logic', 'FuzzyLogic'),
                        grammar=project.queryconf.get('grammar',
                                                      'PRACGrammar'))
        wnmod = self.prac.module('wn_senses')

        pngs = {}
        for i, db in enumerate(dbs):
            # ==================================================================
            # Preprocessing
            # ==================================================================
            db_ = wnmod.add_sims(db, mln)
            infstep.indbs.append(db_)
            try:
                # ==============================================================
                # Inference
                # ==============================================================

                infer = self.mlnquery(config=project.queryconf,
                                      verbose=self.prac.verbose > 2,
                                      db=db_,
                                      mln=mln)
                result_db = infer.resultdb

                if self.prac.verbose == 2:
                    print()
                    print(prac_heading('INFERENCE RESULTS'))
                    print()
                    infer.write()

                # ==============================================================
                # Postprocessing
                # ==============================================================
                unified_db = db.copy(self.prac.mln)
                props = [
                    p for p in project.queryconf.get('queries', '').split(',')
                    if p != 'has_sense'
                ]
                for p in props:
                    for q in result_db.query(
                            '{}(?w1,?w2) ^ has_sense(?w2,?s2)'.format(p)):
                        unified_db << '{}({},{})'.format(p, q['?w1'], q['?w2'])
                        unified_db << 'has_sense({},{})'.format(
                            q['?w2'], q['?s2'])

                infstep.outdbs.append(unified_db)
            except NoConstraintsError:
                logger.debug('No properties found. Passing db...')
                infstep.outdbs.append(db.copy())
            except Exception:
                logger.error('Something went wrong')
                traceback.print_exc()

            pngs['PropExtraction - ' + str(i)] = get_cond_prob_png(
                project.queryconf.get('queries', ''),
                infstep.indbs,
                filename=self.name)
            infstep.png = pngs

        infstep.applied_settings = project.queryconf.config
        return [node]
Ejemplo n.º 11
0
 def loadmln(self, config, mln=None):
     if config == 'query': config = self.queryconf
     elif config == 'learn': config = self.learnconf
     from pracmln.mln.base import parse_mln
     path = self.path if hasattr(self, 'path') else None
     return parse_mln(self.mlns[ifNone(mln, config['mln'])], projectpath=path, logic=config['logic'], grammar=config['grammar'])
Ejemplo n.º 12
0
    def infer(self, savegeometry=True, options={}, *args):
        mln_content = self.mln_container.editor.get("1.0", END).encode('utf8').strip()
        db_content = self.db_container.editor.get("1.0", END).encode('utf8').strip()

        # create conf from current gui settings
        self.update_config()

        # write gui settings
        self.write_gconfig(savegeometry=savegeometry)

        # hide gui
        self.master.withdraw()

        try:
            print headline('PRACMLN QUERY TOOL')
            print

            if options.get('mlnarg') is not None:
                mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')),
                             logic=self.config.get('logic', 'FirstOrderLogic'),
                             grammar=self.config.get('grammar', 'PRACGrammar'))
            else:
                mlnobj = parse_mln(mln_content, searchpaths=[self.dir],
                                   projectpath=os.path.join(self.dir, self.project.name),
                                   logic=self.config.get('logic', 'FirstOrderLogic'),
                                   grammar=self.config.get('grammar', 'PRACGrammar'))

            if options.get('emlnarg') is not None:
                emln_content = mlnpath(options.get('emlnarg')).content
            else:
                emln_content = self.emln_container.editor.get("1.0", END).encode('utf8').strip()

            if options.get('dbarg') is not None:
                dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True))
            else:
                out(self.config.get('ignore_unknown_preds', True))
                dbobj = parse_db(mlnobj, db_content, ignore_unknown_preds=self.config.get('ignore_unknown_preds', True))

            if options.get('queryarg') is not None:
                self.config["queries"] = options.get('queryarg')

            infer = MLNQuery(config=self.config, mln=mlnobj, db=dbobj, emln=emln_content)
            result = infer.run()


            # write to file if run from commandline, otherwise save result to project results
            if options.get('outputfile') is not None:
                output = StringIO.StringIO()
                result.write(output)
                with open(os.path.abspath(options.get('outputfile')), 'w') as f:
                    f.write(output.getvalue())
                logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile'))))
            elif self.save.get():
                output = StringIO.StringIO()
                result.write(output)
                fname = self.output_filename.get()
                self.project.add_result(fname, output.getvalue())
                self.project.save(dirpath=self.dir)
                logger.info('saved result to file results/{} in project {}'.format(fname, self.project.name))
            else:
                logger.debug('No output file given - results have not been saved.')
        except:
            traceback.print_exc()

        # restore main window
        sys.stdout.flush()
        self.master.deiconify()
Ejemplo n.º 13
0
    def run(self):
        watch = StopWatch()
        watch.tag('inference', self.verbose)
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        if self.use_emln and self.emln is not None:
            mlnstr = StringIO.StringIO()
            mln.write(mlnstr)
            mlnstr.close()
            mlnstr = str(mlnstr)
            emln = self.emln
            mln = parse_mln(mlnstr + emln, grammar=self.grammar,
                            logic=self.logic)

        # load the database
        if isinstance(self.db, Database):
            db = self.db
        elif isinstance(self.db, list) and len(self.db) == 1:
            db = self.db[0]
        elif isinstance(self.db, list):
            raise Exception(
                'Got {} dbs. Can only handle one for inference.'.format(
                    len(self.db)))
        else:
            raise Exception('DB of invalid format {}'.format(type(self.db)))

        # expand the
        #  parameters
        params = dict(self._config)
        if 'params' in params:
            params.update(eval("dict(%s)" % params['params']))
            del params['params']
        if self.verbose:
            print tabulate(sorted(list(params.viewitems()), key=lambda (k, v): str(k)), headers=('Parameter:', 'Value:'))
        if type(db) is list and len(db) > 1:
            raise Exception('Inference can only handle one database at a time')
        elif type(db) is list:
            db = db[0]
        params['cw_preds'] = filter(lambda x: bool(x), self.cw_preds)
        # extract and remove all non-algorithm
        for s in GUI_SETTINGS:
            if s in params: del params[s]

        if self.profile:
            prof = Profile()
            print 'starting profiler...'
            prof.enable()
        # set the debug level
        olddebug = praclog.level()
        praclog.level(eval('logging.%s' % params.get('debug', 'WARNING').upper()))
        result = None
        try:
            mln_ = mln.materialize(db)
            mrf = mln_.ground(db)
            inference = self.method(mrf, self.queries, **params)
            if self.verbose:
                print
                print headline('EVIDENCE VARIABLES')
                print
                mrf.print_evidence_vars()

            result = inference.run()
            if self.verbose:
                print
                print headline('INFERENCE RESULTS')
                print
                inference.write()
            if self.verbose:
                print
                inference.write_elapsed_time()
        except SystemExit:
            print 'Cancelled...'
        finally:
            if self.profile:
                prof.disable()
                print headline('PROFILER STATISTICS')
                ps = pstats.Stats(prof, stream=sys.stdout).sort_stats('cumulative')
                ps.print_stats()
            # reset the debug level
            praclog.level(olddebug)
        if self.verbose:
            print
            watch.finish()
            watch.printSteps()
        return result
Ejemplo n.º 14
0
    def __call__(self, node, **params):
        # ======================================================================
        # Initialization
        # ======================================================================
        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Recognizing {} Roles'.format({
                True: 'MISSING',
                False: 'GIVEN'
            }[params.get('missing', False)]))

        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        queries = ''
        wnmod = self.prac.module('wn_senses')
        actionroles = defaultdict(list)
        pngs = {}
        for n, olddb in enumerate(dbs):
            db_copy = olddb.copy(mln=self.prac.mln)
            actioncore = node.frame.actioncore
            logger.debug(actioncore)
            if params.get('project', None) is None:
                logger.debug('Loading Project: {}.pracmln'.format(
                    colorize(actioncore, (None, 'cyan', True), True)))
                projectpath = os.path.join(pracloc.pracmodules, self.name,
                                           '{}.pracmln'.format(actioncore))
                project = MLNProject.open(projectpath)
            else:
                logger.debug(
                    colorize('Loading Project from params',
                             (None, 'cyan', True), True))
                projectpath = os.path.join(
                    params.get('projectpath', None)
                    or os.path.join(pracloc.pracmodules, self.name),
                    params.get('project').name)
                project = params.get('project')

            queries = project.queryconf.get('queries', '')
            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(mlntext,
                            searchpaths=[self.module_path],
                            projectpath=projectpath,
                            logic=project.queryconf.get('logic', 'FuzzyLogic'),
                            grammar=project.queryconf.get(
                                'grammar', 'PRACGrammar'))
            known_concepts = mln.domains.get('concept', [])

            # ==============================================================
            # Preprocessing
            # ==============================================================
            # adding senses and similarities. might be obsolete as it has
            # already been performed in ac recognition
            logger.debug('adding senses. concepts={}'.format(known_concepts))
            db = wnmod.get_senses_and_similarities(db_copy, known_concepts)

            # we need senses and similarities as well as original evidence
            tmp_union_db = db.union(db_copy, mln=self.prac.mln)

            # ignore roles of false ac's
            new_tmp_union_db = tmp_union_db.copy(mln=self.prac.mln)
            roles = self.prac.actioncores[actioncore].roles
            for ac in tmp_union_db.domains['actioncore']:
                if ac == actioncore: continue
                for r in roles:
                    for w in new_tmp_union_db.words():
                        new_tmp_union_db << ('{}({},{})'.format(r, w, ac), 0)

            infstep.indbs.append(new_tmp_union_db)
            # ==============================================================
            # Inference
            # ==============================================================

            infer = self.mlnquery(config=project.queryconf,
                                  verbose=self.prac.verbose > 2,
                                  db=new_tmp_union_db,
                                  mln=mln)
            resultdb = infer.resultdb

            if self.prac.verbose == 2:
                print
                print prac_heading('INFERENCE RESULTS')
                infer.write()

            # ==============================================================
            # Postprocessing
            # ==============================================================
            # get query roles for given actioncore and add inference results
            # for them to final output db. ignore 0-truth results.
            unified_db = new_tmp_union_db.union(resultdb, mln=self.prac.mln)
            #             node.frame.actionroles = defaultdict(list)
            for role, word in unified_db.rolesw(actioncore):
                sense = unified_db.sense(word)
                props = dict(unified_db.properties(word))
                obj = Object(self.prac,
                             id_=word,
                             type_=sense,
                             props=props,
                             syntax=node.pracinfer.buildword(unified_db, word))
                actionroles[role].append(obj)

            # argdoms = kb.query_mln.predicate(role).argdoms
            roles = self.prac.actioncores[actioncore].roles
            new_result = PRACDatabase(self.prac)
            for atom, truth in unified_db.evidence.iteritems():
                if any(r in atom for r in roles):
                    (_, predname,
                     args) = self.prac.mln.logic.parse_literal(atom)
                    if not args[-1] == actioncore:
                        continue
                new_result << (atom, truth)

            for q in unified_db.query('has_sense(?w, ?s)'):
                # TODO Add additional formulas to avoid the using of null values
                if self.prac.verbose > 1:
                    print colorize('  WORD:', (None, 'white', True),
                                   True), q['?w']
                    print colorize('  SENSE:', (None, 'white', True),
                                   True), q['?s']
                    wnmod.printWordSenses(
                        wnmod.get_possible_meanings_of_word(
                            unified_db, q['?w']), q['?s'])
                    print

            infstep.outdbs.append(new_result)

            pngs['Recognizing {} roles - {}'.format(
                'given', str(n))] = get_cond_prob_png(queries,
                                                      infstep.indbs,
                                                      filename=self.name)
            infstep.png = pngs

            if 'project' not in locals():
                raise Exception('no actioncore in database: %s' % olddb)

            infstep.applied_settings = project.queryconf.config
#         pprint(actionroles)
        newframes = splitd(actionroles)
        pred = None
        for newframe in newframes:
            #             pprint(newframe)
            f = Frame(self.prac,
                      node.frame.sidx,
                      node.frame.sentence,
                      syntax=list(olddb.syntax()),
                      words=node.frame.words,
                      actioncore=node.frame.actioncore,
                      actionroles=newframe)
            logger.debug('created new frame %s' % f)
            #             for db in infstep.outdbs:
            #                 out(db.syntax())
            pred = FrameNode(node.pracinfer,
                             f,
                             node,
                             pred,
                             indbs=infstep.outdbs,
                             prevmod=self.name)
            yield pred
Ejemplo n.º 15
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Resolving Coreferences')

        preds = list(
            node.rdfs(
                goaltest=lambda n: isinstance(n, FrameNode) and not n.children,
                all=True))[:2]
        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        projectpath = os.path.join(pracloc.pracmodules, self.name)
        ac = None
        pngs = {}

        #         if not preds: return []
        # ======================================================================
        # Preprocessing
        # ======================================================================

        # merge output dbs from senses_and_roles step, containing
        # roles inferred from multiple sentences.
        if not preds:
            # no coreferencing required - forward dbs and settings
            # from previous module
            infstep.indbs = [db.copy() for db in dbs]
            infstep.outdbs = [db.copy() for db in infstep.indbs]
            logger.debug(
                '%s has no predecessors. Nothing to do here. Passing db...' %
                node)
            return [node]

        # retrieve all words from the dbs to calculate distances.
        # Do not use pracinference.instructions as they are not
        # annotated by the Stanford parser.
        sentences = [db.words() for pred in preds for db in pred.indbs]
        infstep.indbs = [db.copy() for db in dbs]
        #         infstep.outdbs = [db.copy() for db in infstep.indbs]
        # query action core to load corresponding project

        actioncore = node.frame.actioncore
        # clear corefdb and unify current db with the two preceding ones
        corefdb = PRACDatabase(self.prac)
        corefdb = corefdb.union(dbs, self.prac.mln)
        #         for s in range(max(0, i - 2), i+1):
        #             corefdb = corefdb.union(dbs[s], self.prac.mln)
        for pred in preds:
            logger.debug('unifying with %s' % pred)
            for db in pred.indbs:
                corefdb = corefdb.union(db, self.prac.mln)

        # remove all senses from the databases' domain that are not
        # assigned to any word.
        for q in corefdb.query('!(EXIST ?w (has_sense(?w,?sense)))'):
            corefdb.rmval('sense', q['?sense'])
        try:
            # preprocessing: adding distance information for each
            # word in the instructions
            #             s = words[max(0, i - 2):i+1]
            #             snts = list(enumerate(s))
            #             idx = len(snts) - 1  # idx of current sentence
            #             for s in snts[:-1]:
            #                 idx2 = s[0]
            #                 for w in s[1]:
            #                     corefdb << 'distance({},DIST{})'.format(w, idx - idx2)
            for sidx, s in enumerate(sentences):
                for w in s:
                    cont = True
                    for q in corefdb.query('distance({}, ?w)'.format(w)):
                        cont = False
                        break
                    if not cont: continue
                    corefdb << 'distance({},DIST{})'.format(w, sidx)
#                     print 'distance({},DIST{})'.format(w, sidx)

            logger.debug('loading Project: {}'.format(
                colorize(actioncore, (None, 'cyan', True), True)))
            project = MLNProject.open(
                os.path.join(projectpath, '{}.pracmln'.format(actioncore)))
            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(mlntext,
                            searchpaths=[self.module_path],
                            projectpath=projectpath,
                            logic=project.queryconf.get('logic', 'FuzzyLogic'),
                            grammar=project.queryconf.get(
                                'grammar', 'PRACGrammar'))
        except MLNParsingError:
            logger.warning(
                'Could not use MLN in project {} for coreference resolution'.
                format(colorize(actioncore, (None, 'cyan', True), True)))
            infstep.outdbs = [db.copy(self.prac.mln) for db in dbs]
            infstep.png = node.parent.laststep.png
            infstep.applied_settings = node.parent.laststep.applied_settings
            return [node]
        except Exception:
            infstep.outdbs = [db.copy(self.prac.mln) for db in dbs]
            infstep.png = node.parent.laststep.png
            infstep.applied_settings = node.parent.laststep.applied_settings
            logger.warning(
                'Could not load project "{}". Passing dbs to next module...'.
                format(ac))
            return [node]

        # adding similarities
        wnmod = self.prac.module('wn_senses')
        newdatabase = wnmod.add_sims(corefdb, mln)

        # update queries depending on missing roles
        acroles = filter(lambda role: role != 'action_verb',
                         self.prac.actioncores[actioncore].roles)
        missingroles = [
            ar for ar in acroles
            if len(list(newdatabase.query('{}(?w,{})'.format(ar, actioncore))))
            == 0
        ]
        conf = project.queryconf
        conf.update({'queries': ','.join(missingroles)})
        print colorize('querying for missing roles {}'.format(conf['queries']),
                       (None, 'green', True), True)

        # asserting impossible role-ac combinations, leaving previously
        # inferred roles untouched
        fulldom = mergedom(mln.domains, newdatabase.domains)
        ac_domains = [dom for dom in fulldom if '_ac' in dom]
        acs = list(set([v for a in ac_domains for v in fulldom[a]]))
        acs = filter(lambda ac_: ac_ != actioncore, acs)

        for ac1 in acs:
            for r in missingroles:
                for w in newdatabase.domains['word']:
                    # words with no sense are asserted false
                    if list(
                            corefdb.query(
                                '!(EXIST ?sense (has_sense({},?sense)))'.
                                format(w))):
                        newdatabase << '!{}({},{})'.format(r, w, actioncore)
                    # leave previously inferred information roles
                    # untouched
                    if list(newdatabase.query('{}({},{})'.format(r, w, ac1))):
                        continue
                    else:
                        newdatabase << '!{}({},{})'.format(r, w, ac1)
        try:
            # ==========================================================
            # Inference
            # ==========================================================
            infer = self.mlnquery(config=conf,
                                  verbose=self.prac.verbose > 2,
                                  db=newdatabase,
                                  mln=mln)
            if self.prac.verbose == 2:
                print
                print prac_heading('INFERENCE RESULTS')
                infer.write()
            # ==========================================================
            # Postprocessing
            # ==========================================================
            # merge initial db with results
            for db in infstep.indbs:
                resultdb = db.copy()
                for res in infer.results.keys():
                    if infer.results[res] != 1.0:
                        continue
                    resultdb << str(res)
                    _, _, args = self.prac.mln.logic.parse_literal(res)
                    w = args[0]
                    for q in newdatabase.query(
                            'has_sense({0},?s) ^ has_pos({0},?pos)'.format(w)):
                        resultdb << 'has_sense({},{})'.format(w, q['?s'])
                        resultdb << 'is_a({0},{0})'.format(q['?s'])
                        resultdb << 'has_pos({},{})'.format(w, q['?pos'])
                resultdb = wnmod.add_sims(resultdb, mln)
                # enhance the frame data
                for mrole in missingroles:
                    for q in resultdb.query(
                            '{role}(?w, {actioncore}) ^ has_sense(?w, ?s)'.
                            format(role=mrole, actioncore=actioncore)):
                        for p in preds:
                            if p.frame.object(q['?w']) is not None:
                                node.frame.actionroles[mrole] = p.frame.object(
                                    q['?w'])
                                break
                infstep.outdbs.append(resultdb)
            pprint(node.frame.tojson())
        except NoConstraintsError:
            logger.debug('No coreferences found. Passing db...')
            infstep.outdbs.append(db)
        except Exception:
            logger.error('Something went wrong')
            traceback.print_exc()

        pngs['Coref - ' + str(node)] = get_cond_prob_png(project.queryconf.get(
            'queries', ''),
                                                         dbs,
                                                         filename=self.name)
        infstep.png = pngs
        infstep.applied_settings = project.queryconf.config
        return [node]
Ejemplo n.º 16
0
    def learn(self, savegeometry=True, options=None, *_):
        if options is None:
            options = {}
        mln_content = self.mln_container.editor.get("1.0", END).strip()
        db_content = self.db_container.editor.get("1.0", END).strip()

        # create conf from current gui settings
        self.update_config()

        # write gui settings
        self.write_gconfig(savegeometry=savegeometry)

        # hide gui
        self.master.withdraw()

        try:
            print((headline('PRAC LEARNING TOOL')))
            print()

            if options.get('mlnarg') is not None:
                mlnobj = MLN(mlnfile=os.path.abspath(options.get('mlnarg')),
                             logic=self.config.get('logic', 'FirstOrderLogic'),
                             grammar=self.config.get('grammar', 'PRACGrammar'))
            else:
                mlnobj = parse_mln(mln_content, searchpaths=[self.project_dir],
                                   projectpath=os.path.join(self.project_dir, self.project.name),
                                   logic=self.config.get('logic', 'FirstOrderLogic'),
                                   grammar=self.config.get('grammar', 'PRACGrammar'))

            if options.get('dbarg') is not None:
                dbobj = Database.load(mlnobj, dbfiles=[options.get('dbarg')], ignore_unknown_preds=self.config.get('ignore_unknown_preds', True))
            else:
                if self.config.get('pattern'):
                    local, dblist = self.get_training_db_paths(self.config.get('pattern').strip())
                    dbobj = []
                    # build database list from project dbs
                    if local:
                        for dbname in dblist:
                            dbobj.extend(parse_db(mlnobj, self.project.dbs[dbname].strip(),
                                         ignore_unknown_preds=self.config.get('ignore_unknown_preds', True),
                                         projectpath=os.path.join(self.dir, self.project.name)))
                        out(dbobj)
                    # build database list from filesystem dbs
                    else:
                        for dbpath in dblist:
                            dbobj.extend(Database.load(mlnobj, dbpath, ignore_unknown_preds= self.config.get('ignore_unknown_preds', True)))
                # build single db from currently selected db
                else:
                    dbobj = parse_db(mlnobj, db_content, projectpath=os.path.join(self.dir, self.project.name), dirs=[self.dir])

            learning = MLNLearn(config=self.config, mln=mlnobj, db=dbobj)
            result = learning.run()

            # write to file if run from commandline, otherwise save result
            # to project results
            if options.get('outputfile') is not None:
                output = io.StringIO()
                result.write(output)
                with open(os.path.abspath(options.get('outputfile')), 'w') as f:
                    f.write(output.getvalue())
                logger.info('saved result to {}'.format(os.path.abspath(options.get('outputfile'))))
            elif self.save.get():
                output = io.StringIO()
                result.write(output)
                self.project.add_mln(self.output_filename.get(), output.getvalue())
                self.mln_container.update_file_choices()
                self.project.save(dirpath=self.project_dir)
                logger.info('saved result to file mln/{} in project {}'.format(self.output_filename.get(), self.project.name))
            else:
                logger.debug("No output file given - results have not been saved.")
        except:
            traceback.print_exc()

        # restore gui
        sys.stdout.flush()
        self.master.deiconify()
Ejemplo n.º 17
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Update roles based on Action Core Refinement')

        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        #         planlist = self.getPlanList()
        #         out(node.parent.frame, '->', node.frame)
        pngs = {}
        for i, db_ in enumerate(dbs):
            #             db = db_.copy()
            #             db = PRACDatabase(self.prac)
            # ==================================================================
            # Preprocessing
            # ==================================================================
            actioncore = node.frame.actioncore
            logger.debug('Action core: {}'.format(actioncore))
            if params.get('project', None) is None:
                logger.debug('Loading Project: {}.pracmln'.format(
                    colorize(actioncore, (None, 'cyan', True), True)))
                projectpath = os.path.join(
                    pracloc.pracmodules, self.name,
                    '{}Transformation.pracmln'.format(actioncore))
                project = MLNProject.open(projectpath)
            else:
                logger.debug(
                    colorize('Loading Project from params',
                             (None, 'cyan', True), True))
                projectpath = os.path.join(
                    params.get('projectpath', None)
                    or os.path.join(pracloc.pracmodules, self.name),
                    params.get('project').name)
                project = params.get('project')

            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(
                mlntext,
                searchpaths=[self.module_path],
                projectpath=projectpath,
                logic=project.queryconf.get('logic', 'FirstOrderLogic'),
                grammar=project.queryconf.get('grammar', 'PRACGrammar'))
            result_db = None

            for pdb in node.parent.outdbs:
                db = pdb.copy()
                db = db.union(db_)
                objs = {o.id for o in node.parent.frame.actionroles.values()}
                for w in set(db.domains['word']):
                    if w not in objs:
                        db.rmval('word', w)
                infstep.indbs.append(db)
                ac = node.parent.frame.actioncore
                db << 'achieved_by(%s, %s)' % (ac, actioncore)
                for role, object_ in node.parent.frame.actionroles.iteritems():
                    db << '%s(%s, %s)' % (role, object_.id, ac)
            try:
                # ==========================================================
                # Inference
                # ==========================================================
                infer = self.mlnquery(config=project.queryconf,
                                      db=db,
                                      verbose=self.prac.verbose > 2,
                                      mln=mln)
                result_db = infer.resultdb

                if self.prac.verbose == 2:
                    print
                    print prac_heading('INFERENCE RESULTS')
                    print
                    infer.write()
            except NoConstraintsError:
                logger.error(
                    'no constraints in role transformation: %s -> %s' %
                    (node.parent.frame, node.frame))
                result_db = db

            # ==============================================================
            # Postprocessing
            # ==============================================================
            r_db = PRACDatabase(self.prac)
            roles = self.prac.actioncores[actioncore].roles
            for atom, truth in sorted(result_db.evidence.iteritems()):
                if any(r in atom for r in roles):
                    _, predname, args = self.prac.mln.logic.parse_literal(atom)
                    word, ac = args
                    if ac == actioncore:
                        r_db << (atom, truth)
                        if truth:
                            sense = pdb.sense(word)
                            props = pdb.properties(word)
                            obj = Object(self.prac,
                                         id_=word,
                                         type_=sense,
                                         props=props,
                                         syntax=node.pracinfer.buildword(
                                             pdb, word))
                            node.frame.actionroles[predname] = obj
#             out('->', node.frame)
            unified_db = db.union(r_db, mln=self.prac.mln)
            r_db_ = PRACDatabase(self.prac)

            # It will be assumed that there is only one true action_
            # c1ore predicate per database
            #             for actionverb, actioncore in unified_db.actioncores(): break

            for atom, truth in sorted(unified_db.evidence.iteritems()):
                if 'action_core' in atom: continue
                r_db_ << (atom, truth)
            infstep.outdbs.append(r_db_)

            pngs['RolesTransformation - ' + str(i)] = get_cond_prob_png(
                project.queryconf.get('queries', ''), dbs, filename=self.name)
            infstep.png = pngs
            infstep.applied_settings = project.queryconf.config
        return [node]