コード例 #1
0
    def __call__(self, node):
        # ======================================================================
        # Initialization
        # ======================================================================
        if not isinstance(node, NLInstruction):
            raise ValueError('Argument must be NLInstruction, got %s' %
                             type(node).__name__)

        if self.prac.verbose > 0:
            print(prac_heading('Parsing %s' % node))
        infstep = PRACInferenceStep(node, self)
        # ======================================================================
        # Preprocessing
        # ======================================================================
        instr = self.compounds(node.instr)
        # ======================================================================
        # Parsing Instructions
        # ======================================================================
        if self.prac.verbose > 0:
            print(
                colorize('Parsing instruction: "%s"',
                         (None, 'white', True), True) % instr)
        dbs = self.parse([instr])
        #-----------------------------------------------------------------------
        # here come some dirty hacks to catch some very frequent and
        # annoying parsing errors:

        # 1. "season" is consequently tagged as a noun. We retag it as a verb
        for db in dbs:
            for q in db.query('has_pos(?w,NN)'):
                if q['?w'].lower().startswith('season'):
                    db['has_pos(%s,NN)' % q['?w']] = 0
                    db['has_pos(%s,VB)' % q['?w']] = 1
        pngs = {}
        for i, db in enumerate(dbs):
            infstep.outdbs.append(db)

            if self.prac.verbose > 1:
                print()
                print(
                    colorize('Syntactic evidence:', (None, 'white', True),
                             True))
                db.write(sys.stdout, True)
                print()
            pngs['NL Parsing - ' + str(i)] = get_cond_prob_png(
                ','.join([x.name for x in self.mln.predicates[:10]]) + ',...',
                str(node.instr),
                filename=self.name)
            infstep.png = pngs
        yield node
コード例 #2
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================
        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        infstep.indbs = [db.copy() for db in dbs]
        infstep.outdbs = [db.copy() for db in dbs]

        logger.debug('Running {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Generating CRAM Plan(s)'))

        if not hasattr(self.prac.actioncores[node.frame.actioncore], 'plan'):
            raise ActionKnowledgeError('I don\'t know how to %s' %
                                       node.frame.sentence)
            yield
        ac = self.prac.actioncores[node.frame.actioncore]
        # fill dictionary with all inferred roles...
        acdict = dict([(k, v.type)
                       for k, v in list(node.frame.actionroles.items())])

        # ..and their properties
        acdict.update(
            dict([('{}_props'.format(k), ' '.join([
                '({} {})'.format(pkey, pval)
                for pkey, pval in list(v.props.tojson().items())
            ])) for k, v in list(node.frame.actionroles.items())]))

        # update dictionary with missing roles and roles properties
        for role in ac.roles:
            if acdict.get(role) is None:
                acdict[role] = 'Unknown'
                acdict['{}_props'.format(role)] = ''

        node.plan = ac.parameterize_plan(**acdict)

        if self.prac.verbose:
            print()
            print(prac_heading('PLAN GENERATION RESULTS'))
            print(colorize('actioncore:', (None, 'white', True), True),
                  colorize(ac.name, (None, 'cyan', True), True))
            print(colorize('assignments:', (None, 'white', True), True))
            for x in acdict:
                print('\t{}: {}'.format(
                    colorize(x, (None, 'white', True), True),
                    colorize(acdict[x], (None, 'cyan', True), True)))
コード例 #3
0
ファイル: database.py プロジェクト: JIELOGAN/pracmln
 def write(self, stream=sys.stdout, color=None, bars=True):
     '''
     Writes this database into the stream in the MLN Database format.
     The stream must provide a `write()` method as file objects do.
     '''
     if color is None:
         if stream != sys.stdout:
             color = False
         else:
             color = True
     for atom in sorted(self._evidence):
         truth = self._evidence[atom]
         pred, params = self.mln.logic.parse_atom(atom)
         pred = str(pred)
         params = map(str, params)
         if bars:
             bar = barstr(30, truth, color='magenta' if color else None)
         else:
             bar = ''
         if color:
             strout = '%s  %s\n' % (
                 bar if bars else colorize('%.6f' % truth,
                                           (None, 'magenta', False), True),
                 FirstOrderLogic.Lit(False, pred, params,
                                     self.mln).cstr(color))
         else:
             strout = '%s  %s\n' % (bar if bars else '%.6f' % truth,
                                    FirstOrderLogic.Lit(
                                        False, pred, params,
                                        self.mln).cstr(color))
         stream.write(strout)
コード例 #4
0
def prac_heading(s, upper=True, color='green'):
    '''
    Returns a colorized and formatted string for pretty priting module
    headings.

    :param s:       the string to be formatted
    :param upper:   (bool) if string should be converted to uppercase. default
                    is true
    :param color:   the color in which the heading should be printed. default
                    is green
    :return:        the colorized and formatted string
    '''
    b = colorize('+{}+'.format(''.ljust(len(s) + 2, '=')), (None, color, True),
                 True)
    t = colorize('| {} |'.format(s.upper() if upper else s),
                 (None, color, True), True)
    return '\n{}\n{}\n{}\n'.format(b, t, b)
コード例 #5
0
def printListAndTick(l, t):
    if type(t) is str:
        t = l.index(t)
    for idx, item in enumerate(l):
        print('    [{}] {}'.format(
            'X' if t == idx else ' ',
            colorize(item, (None, {
                True: 'yellow',
                False: 'white'
            }[t == idx], True), True)))
コード例 #6
0
ファイル: base.py プロジェクト: Trixter9994/lazero
 def write(self, stream=sys.stdout, color=None):
     '''
     Writes the MLN to the given stream.
     
     The default stream is `sys.stdout`. In order to print the MLN to the console, a simple
     call of `mln.write()` is sufficient. If color is not specified (is None), then the
     output to the console will be colored and uncolored for every other stream. 
     
     :param stream:        the stream to write the MLN to.
     :param color:         whether or not output should be colorized.
     '''
     if color is None:
         if stream != sys.stdout:
             color = False
         else:
             color = True
     if 'learnwts_message' in dir(self):
         stream.write("/*\n%s*/\n\n" % self.learnwts_message)
     # domain declarations
     if self.domain_decls:
         stream.write(
             colorize("// domain declarations\n", comment_color, color))
     for d in self.domain_decls:
         stream.write("%s\n" % d)
     stream.write('\n')
     # variable definitions
     if self.vars:
         stream.write(
             colorize('// variable definitions\n', comment_color, color))
     for var, val in self.vars.iteritems():
         stream.write('%s = %s' % (var, val))
     stream.write('\n')
     stream.write(
         colorize("\n// predicate declarations\n", comment_color, color))
     for predicate in self.iterpreds():
         if isinstance(predicate,
                       FuzzyPredicate) or predicate.name in self.fuzzypreds:
             stream.write('#fuzzy\n')
         stream.write("%s(%s)\n" % (colorize(
             predicate.name, predicate_color, color), predicate.argstr()))
     stream.write(colorize("\n// formulas\n", comment_color, color))
     for idx, formula in self.iterformulas():
         if self._unique_templvars[idx]:
             stream.write('#unique{%s}\n' %
                          ','.join(self._unique_templvars[idx]))
         if formula.weight == HARD:
             stream.write("%s.\n" % fstr(formula.cstr(color)))
         else:
             try:
                 w = colorize("%-10.6f", weight_color, color) % float(
                     eval(str(formula.weight)))
             except:
                 w = colorize(str(formula.weight), weight_color, color)
             stream.write("%s  %s\n" % (w, fstr(formula.cstr(color))))
コード例 #7
0
ファイル: base.py プロジェクト: danielnyga/pracmln
 def write(self, stream=sys.stdout, color=None):
     '''
     Writes the MLN to the given stream.
     
     The default stream is `sys.stdout`. In order to print the MLN to the console, a simple
     call of `mln.write()` is sufficient. If color is not specified (is None), then the
     output to the console will be colored and uncolored for every other stream. 
     
     :param stream:        the stream to write the MLN to.
     :param color:         whether or not output should be colorized.
     '''
     if color is None:
         if stream != sys.stdout: 
             color = False
         else: color = True
     if 'learnwts_message' in dir(self):
         stream.write("/*\n%s*/\n\n" % self.learnwts_message)
     # domain declarations
     if self.domain_decls: stream.write(colorize("// domain declarations\n", comment_color, color))
     for d in self.domain_decls: 
         stream.write("%s\n" % d)
     stream.write('\n')
     # variable definitions
     if self.vars: stream.write(colorize('// variable definitions\n', comment_color, color))
     for var, val in self.vars.iteritems():
         stream.write('%s = %s' % (var, val))
     stream.write('\n')
     stream.write(colorize("\n// predicate declarations\n", comment_color, color))
     for predicate in self.iterpreds():
         if isinstance(predicate, FuzzyPredicate) or predicate.name in self.fuzzypreds:
             stream.write('#fuzzy\n')
         stream.write("%s(%s)\n" % (colorize(predicate.name, predicate_color, color), predicate.argstr()))
     stream.write(colorize("\n// formulas\n", comment_color, color))
     for idx, formula in self.iterformulas():
         if self._unique_templvars[idx]:
             stream.write('#unique{%s}\n' % ','.join(self._unique_templvars[idx]))
         if formula.weight == HARD:
             stream.write("%s.\n" % fstr(formula.cstr(color)))
         else:
             try:
                 w = colorize("%-10.6f", weight_color, color) % float(eval(str(formula.weight)))
             except:
                 w = colorize(str(formula.weight), weight_color, color)
             stream.write("%s  %s\n" % (w, fstr(formula.cstr(color))))
コード例 #8
0
ファイル: database.py プロジェクト: Bovril/pracmln
 def write(self, stream=sys.stdout, color=None, bars=True):
     '''
     Writes this database into the stream in the MLN Database format.
     The stream must provide a `write()` method as file objects do.
     '''
     if color is None:
         if stream != sys.stdout: 
             color = False
         else: color = True
     for atom in sorted(self._evidence):
         truth = self._evidence[atom]
         pred, params = self.mln.logic.parse_atom(atom)
         pred = str(pred)
         params = map(str, params)
         if bars:
             bar = barstr(30, truth, color='magenta' if color else None)
         else:
             bar = ''
         if color:
             strout = '%s  %s\n' % (bar if bars else  colorize('%.6f' % truth, (None, 'magenta', False), True), 
                                    FirstOrderLogic.Lit(False, pred, params, self.mln).cstr(color))
         else:
             strout = '%s  %s\n' % (bar if bars else  '%.6f' % truth, FirstOrderLogic.Lit(False, pred, params, self.mln).cstr(color))
         stream.write(strout)
コード例 #9
0
ファイル: wnsenses.py プロジェクト: Trixter9994/lazero
 def printWordSenses(self, synsets, tick):
     '''
     Prints the list of synsets or synset ids and ticks the one specified
     by the given index.
     tick may be either the index or the sense id itself or the synset
     instance.
     '''
     if type(tick) is str:
         tick = self.prac.wordnet.synset(tick)
     synsets_ = []
     for idx, sense in enumerate(synsets):
         if isinstance(sense, str):
             sense = self.prac.wordnet.synset(sense)
         synsets_.append(sense)
     if isinstance(tick, Synset):
         tick = synsets_.index(tick)
     for idx, sense in enumerate(synsets_):
         print '    [{}] {}: {} ({})'.format(
             'X' if tick == idx else ' ',
             colorize(sense.name(), (None, {
                 True: 'yellow',
                 False: 'white'
             }[tick == idx], True), True), sense.definition(),
             ';'.join(sense.examples()))
コード例 #10
0
ファイル: coref.py プロジェクト: Trixter9994/lazero
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Resolving Coreferences')

        preds = list(
            node.rdfs(
                goaltest=lambda n: isinstance(n, FrameNode) and not n.children,
                all=True))[:2]
        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        projectpath = os.path.join(pracloc.pracmodules, self.name)
        ac = None
        pngs = {}

        #         if not preds: return []
        # ======================================================================
        # Preprocessing
        # ======================================================================

        # merge output dbs from senses_and_roles step, containing
        # roles inferred from multiple sentences.
        if not preds:
            # no coreferencing required - forward dbs and settings
            # from previous module
            infstep.indbs = [db.copy() for db in dbs]
            infstep.outdbs = [db.copy() for db in infstep.indbs]
            logger.debug(
                '%s has no predecessors. Nothing to do here. Passing db...' %
                node)
            return [node]

        # retrieve all words from the dbs to calculate distances.
        # Do not use pracinference.instructions as they are not
        # annotated by the Stanford parser.
        sentences = [db.words() for pred in preds for db in pred.indbs]
        infstep.indbs = [db.copy() for db in dbs]
        #         infstep.outdbs = [db.copy() for db in infstep.indbs]
        # query action core to load corresponding project

        actioncore = node.frame.actioncore
        # clear corefdb and unify current db with the two preceding ones
        corefdb = PRACDatabase(self.prac)
        corefdb = corefdb.union(dbs, self.prac.mln)
        #         for s in range(max(0, i - 2), i+1):
        #             corefdb = corefdb.union(dbs[s], self.prac.mln)
        for pred in preds:
            logger.debug('unifying with %s' % pred)
            for db in pred.indbs:
                corefdb = corefdb.union(db, self.prac.mln)

        # remove all senses from the databases' domain that are not
        # assigned to any word.
        for q in corefdb.query('!(EXIST ?w (has_sense(?w,?sense)))'):
            corefdb.rmval('sense', q['?sense'])
        try:
            # preprocessing: adding distance information for each
            # word in the instructions
            #             s = words[max(0, i - 2):i+1]
            #             snts = list(enumerate(s))
            #             idx = len(snts) - 1  # idx of current sentence
            #             for s in snts[:-1]:
            #                 idx2 = s[0]
            #                 for w in s[1]:
            #                     corefdb << 'distance({},DIST{})'.format(w, idx - idx2)
            for sidx, s in enumerate(sentences):
                for w in s:
                    cont = True
                    for q in corefdb.query('distance({}, ?w)'.format(w)):
                        cont = False
                        break
                    if not cont: continue
                    corefdb << 'distance({},DIST{})'.format(w, sidx)
#                     print 'distance({},DIST{})'.format(w, sidx)

            logger.debug('loading Project: {}'.format(
                colorize(actioncore, (None, 'cyan', True), True)))
            project = MLNProject.open(
                os.path.join(projectpath, '{}.pracmln'.format(actioncore)))
            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(mlntext,
                            searchpaths=[self.module_path],
                            projectpath=projectpath,
                            logic=project.queryconf.get('logic', 'FuzzyLogic'),
                            grammar=project.queryconf.get(
                                'grammar', 'PRACGrammar'))
        except MLNParsingError:
            logger.warning(
                'Could not use MLN in project {} for coreference resolution'.
                format(colorize(actioncore, (None, 'cyan', True), True)))
            infstep.outdbs = [db.copy(self.prac.mln) for db in dbs]
            infstep.png = node.parent.laststep.png
            infstep.applied_settings = node.parent.laststep.applied_settings
            return [node]
        except Exception:
            infstep.outdbs = [db.copy(self.prac.mln) for db in dbs]
            infstep.png = node.parent.laststep.png
            infstep.applied_settings = node.parent.laststep.applied_settings
            logger.warning(
                'Could not load project "{}". Passing dbs to next module...'.
                format(ac))
            return [node]

        # adding similarities
        wnmod = self.prac.module('wn_senses')
        newdatabase = wnmod.add_sims(corefdb, mln)

        # update queries depending on missing roles
        acroles = filter(lambda role: role != 'action_verb',
                         self.prac.actioncores[actioncore].roles)
        missingroles = [
            ar for ar in acroles
            if len(list(newdatabase.query('{}(?w,{})'.format(ar, actioncore))))
            == 0
        ]
        conf = project.queryconf
        conf.update({'queries': ','.join(missingroles)})
        print colorize('querying for missing roles {}'.format(conf['queries']),
                       (None, 'green', True), True)

        # asserting impossible role-ac combinations, leaving previously
        # inferred roles untouched
        fulldom = mergedom(mln.domains, newdatabase.domains)
        ac_domains = [dom for dom in fulldom if '_ac' in dom]
        acs = list(set([v for a in ac_domains for v in fulldom[a]]))
        acs = filter(lambda ac_: ac_ != actioncore, acs)

        for ac1 in acs:
            for r in missingroles:
                for w in newdatabase.domains['word']:
                    # words with no sense are asserted false
                    if list(
                            corefdb.query(
                                '!(EXIST ?sense (has_sense({},?sense)))'.
                                format(w))):
                        newdatabase << '!{}({},{})'.format(r, w, actioncore)
                    # leave previously inferred information roles
                    # untouched
                    if list(newdatabase.query('{}({},{})'.format(r, w, ac1))):
                        continue
                    else:
                        newdatabase << '!{}({},{})'.format(r, w, ac1)
        try:
            # ==========================================================
            # Inference
            # ==========================================================
            infer = self.mlnquery(config=conf,
                                  verbose=self.prac.verbose > 2,
                                  db=newdatabase,
                                  mln=mln)
            if self.prac.verbose == 2:
                print
                print prac_heading('INFERENCE RESULTS')
                infer.write()
            # ==========================================================
            # Postprocessing
            # ==========================================================
            # merge initial db with results
            for db in infstep.indbs:
                resultdb = db.copy()
                for res in infer.results.keys():
                    if infer.results[res] != 1.0:
                        continue
                    resultdb << str(res)
                    _, _, args = self.prac.mln.logic.parse_literal(res)
                    w = args[0]
                    for q in newdatabase.query(
                            'has_sense({0},?s) ^ has_pos({0},?pos)'.format(w)):
                        resultdb << 'has_sense({},{})'.format(w, q['?s'])
                        resultdb << 'is_a({0},{0})'.format(q['?s'])
                        resultdb << 'has_pos({},{})'.format(w, q['?pos'])
                resultdb = wnmod.add_sims(resultdb, mln)
                # enhance the frame data
                for mrole in missingroles:
                    for q in resultdb.query(
                            '{role}(?w, {actioncore}) ^ has_sense(?w, ?s)'.
                            format(role=mrole, actioncore=actioncore)):
                        for p in preds:
                            if p.frame.object(q['?w']) is not None:
                                node.frame.actionroles[mrole] = p.frame.object(
                                    q['?w'])
                                break
                infstep.outdbs.append(resultdb)
            pprint(node.frame.tojson())
        except NoConstraintsError:
            logger.debug('No coreferences found. Passing db...')
            infstep.outdbs.append(db)
        except Exception:
            logger.error('Something went wrong')
            traceback.print_exc()

        pngs['Coref - ' + str(node)] = get_cond_prob_png(project.queryconf.get(
            'queries', ''),
                                                         dbs,
                                                         filename=self.name)
        infstep.png = pngs
        infstep.applied_settings = project.queryconf.config
        return [node]
コード例 #11
0
    def __call__(self, node, **params):
        # ======================================================================
        # Initialization
        # ======================================================================
        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Recognizing Action Cores'))

        if params.get('project', None) is None:
            # load default project
            projectpath = os.path.join(pracloc.pracmodules, self.name,
                                       self.defproject)
            ac_project = MLNProject.open(projectpath)
        else:
            logger.info(
                colorize('Loading Project from params', (None, 'cyan', True),
                         True))
            projectpath = os.path.join(
                params.get('projectpath', None)
                or os.path.join(pracloc.pracmodules, self.name),
                params.get('project').name)
            ac_project = params.get('project')

        dbs = node.outdbs

        mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=ac_project.queryconf.get('logic',
                                                       'FirstOrderLogic'),
                        grammar=ac_project.queryconf.get(
                            'grammar', 'PRACGrammar'))
        known_concepts = mln.domains.get('concept', [])
        infstep = PRACInferenceStep(node, self)
        wnmod = self.prac.module('wn_senses')

        pngs = {}
        nlinstr = node.nlinstr()
        sidx = nlinstr.idx()
        sentence = nlinstr.instr

        for db_ in dbs:
            # ==================================================================
            # Preprocessing
            # ==================================================================
            db = wnmod.get_senses_and_similarities(db_, known_concepts)
            tmp_union_db = db.union(db_, mln=self.prac.mln)
            infstep.indbs.append(tmp_union_db)

            # ==================================================================
            # Inference
            # ==================================================================
            infer = self.mlnquery(config=ac_project.queryconf,
                                  verbose=self.prac.verbose > 2,
                                  db=tmp_union_db,
                                  mln=mln)
            resultdb = infer.resultdb
            if self.prac.verbose == 2:
                print()
                print(prac_heading('INFERENCE RESULTS'))
                infer.write()
            # ==================================================================
            # Postprocessing
            # ==================================================================
            unified_db = resultdb.union(tmp_union_db, mln=self.prac.mln)

            #             infstep.outdbs
            infstep.outdbs.extend(
                self.extract_multiple_action_cores(self.prac, unified_db,
                                                   wnmod, known_concepts))

            pngs[unified_db.domains.get('actioncore',
                                        [None])[0]] = get_cond_prob_png(
                                            ac_project.queryconf.get(
                                                'queries', ''),
                                            dbs,
                                            filename=self.name)
        infstep.png = pngs
        infstep.applied_settings = ac_project.queryconf.config
        pred = None
        for outdb in infstep.outdbs:
            #             out('in ac rec:')
            #             for w, ac in outdb.actioncores():
            #                 out(w, ac)
            for frame in node.pracinfer.buildframes(outdb, sidx, sentence):
                node_ = FrameNode(node.pracinfer,
                                  frame,
                                  node,
                                  pred,
                                  indbs=[outdb],
                                  prevmod=self.name)
                pred = node_
                yield node_
                break
            else:
                logger.error('no actioncore recognized in %s' % node)
                raise Exception('no actioncore recognized in %s' % node)
コード例 #12
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Refining Actioncores'))
        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        #         if node.previous_module == 'achieved_by':
        #             raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
        # ======================================================================
        # Preprocessing
        # ======================================================================
        for olddb in dbs:
            infstep.indbs.append(olddb.copy())
            #To handle multiple acs in one task, we have to check if the single
            # dbs contain achieved_bys which representing already plans
            pngs = {}
            actioncore = node.frame.actioncore
            mod = self.prac.module('complex_achieved_by')
            newnodes = list(mod(node))
            n = None
            parentframes = [
                p.frame for p in node.parentspath()
                if isinstance(p, FrameNode)
            ]
            if any(n.frame in parentframes for n in newnodes):
                logger.error(
                    'aborting reasoning because of infinite loop. (%s)' %
                    node.frame)
                node.children = []
            else:
                for n in newnodes:
                    yield n
            if n is not None: return
            if n is None:
                # This list is used to avoid an infinite loop during the
                # achieved by inference.
                # To avoid this infinite loop, the list contains the pracmlns
                # which were inferenced during the process.
                # Every pracmln should be used only once during the process
                # because the evidence for the inference will always remain
                # the same.
                # So if the pracmln hadnt inferenced a plan in the first time,
                # it will never do it.

                # Need to remove possible achieved_by predicates from
                # previous achieved_by inferences
                db_ = PRACDatabase(self.prac)
                for atom, truth in sorted(olddb.evidence.items()):
                    if 'achieved_by' in atom: continue
                    db_ << (atom, truth)
                if params.get('project', None) is None:
                    logger.debug('Loading Project: {}.pracmln'.format(
                        colorize(actioncore, (None, 'cyan', True), True)))
                    projectpath = os.path.join(pracloc.pracmodules, self.name,
                                               '{}.pracmln'.format(actioncore))
                    if os.path.exists(projectpath):
                        project = MLNProject.open(projectpath)
                    else:
                        infstep.outdbs.append(olddb)
                        logger.error(actioncore + ".pracmln does not exist.")
                        return
                else:
                    logger.debug(
                        colorize('Loading Project from params',
                                 (None, 'cyan', True), True))
                    projectpath = os.path.join(
                        params.get('projectpath', None)
                        or os.path.join(pracloc.pracmodules, self.name),
                        params.get('project').name)
                    project = params.get('project')

                mlntext = project.mlns.get(project.queryconf['mln'], None)
                mln = parse_mln(
                    mlntext,
                    searchpaths=[self.module_path],
                    projectpath=projectpath,
                    logic=project.queryconf.get('logic', 'FirstOrderLogic'),
                    grammar=project.queryconf.get('grammar', 'PRACGrammar'))
                known_concepts = mln.domains.get('concept', [])
                wnmod = self.prac.module('wn_senses')

                #Merge domains of db and given mln to avoid errors due to role inference and the resulting missing fuzzy perdicates
                known_concepts = list(
                    set(known_concepts).union(
                        set(db_.domains.get('concept', []))))
                db = wnmod.get_senses_and_similarities(db_, known_concepts)

                unified_db = db_.union(db)
                dbnew = wnmod.add_sims(unified_db, unified_db)

                # Inference achieved_by predicate
                db_ = self.extendDBWithAchievedByEvidence(
                    dbnew, mln, actioncore)
                # ==============================================================
                # Inference
                # ==============================================================
                #                 db_.write()
                try:
                    infer = self.mlnquery(config=project.queryconf,
                                          verbose=self.prac.verbose > 2,
                                          db=db_,
                                          mln=mln)
                except NoConstraintsError:
                    logger.error(
                        'achieved_by inference failed due to NoConstraintsError: %s'
                        % node.frame)
                    return
                result_db = infer.resultdb

                if self.prac.verbose == 2:
                    print()
                    print(prac_heading('INFERENCE RESULTS'))
                    infer.write()
                # ==============================================================
                # Postprocessing
                # ==============================================================
                # unified_db = result_db.union(kb.query_mln, db_)
                # only add inferred achieved_by atoms, leave out
                # 0-evidence atoms
                for qa in result_db.query('achieved_by(?ac1,?ac2)'):
                    if qa['?ac2'] == 'Complex': continue
                    unified_db << 'achieved_by({},{})'.format(
                        qa['?ac1'], qa['?ac2'])
                    pngs[qa['?ac2']] = get_cond_prob_png(project.queryconf.get(
                        'queries', ''),
                                                         dbs,
                                                         filename=self.name)
                    newframe = Frame(self.prac,
                                     node.frame.sidx,
                                     '',
                                     words=[],
                                     syntax=[],
                                     actioncore=qa['?ac2'],
                                     actionroles={})
                    #                     out('->', newframe)
                    infstep.outdbs.append(unified_db)
                    yield FrameNode(node.pracinfer,
                                    newframe,
                                    node,
                                    pred=None,
                                    indbs=[unified_db],
                                    prevmod=self.name)
                    return
                infstep.outdbs.append(unified_db)


#             raise ActionKnowledgeError('I don\'t know how to %s' % node.frame.sentence)
コード例 #13
0
ファイル: pracobjrec.py プロジェクト: Trixter9994/lazero
def main():
    headline("Running main...")

    usage = 'PRAC Object Recognition tool'
    parser = argparse.ArgumentParser(description=usage)
    parser.add_argument(
        "-i",
        "--interactive",
        dest="interactive",
        default=False,
        action='store_true',
        help="Starts PRAC object recognition with an interactive "
        "GUI tool.")
    parser.add_argument(
        "-t",
        "--train",
        dest="trainMLN",
        nargs=1,
        default=None,
        help=
        "Train given MLN with inference results from argument. Example: pracobjrec -t orange.n.01 'It is a yellow "
        "or orange fruit.'")
    parser.add_argument(
        "-r",
        "--regular",
        dest="regular",
        default=False,
        action='store_true',
        help="Runs regular inference pipeline. Arguments: mlnName")
    parser.add_argument("-f",
                        "--onthefly",
                        dest="onthefly",
                        default=False,
                        action='store_true',
                        help="Generate MLN on the fly")
    parser.add_argument(
        "-m",
        "--mln",
        nargs=2,
        dest='mln',
        default=None,
        help="Runs regular inference pipeline. Arguments: mlnName")

    args = parser.parse_args()
    opts_ = vars(args)

    interactive = args.interactive
    regular = args.regular
    sentences = args

    prac = PRAC()
    prac.wordnet = WordNet(concepts=None)
    infer = PRACInference(prac, sentences)

    # in case we have natural-language parameters, parse them
    if len(infer.instructions) > 0:
        parser = prac.module('nl_parsing')
        prac.run(infer, parser)

    if interactive:  # use the GUI
        logger.info('Entering interactive mode')
        gui = PRACQueryGUI(infer)
        gui.open()
    elif args.trainMLN:  # training with property inference output
        logger.info(
            'Training MLN {} with result from property inference'.format(
                args.trainMLN))

        # property inference from parsed input
        propExtract = prac.module('prop_extraction')
        prac.run(infer, propExtract)

        objRecog = prac.module('obj_recognition')

        praclearn = PRACLearning(prac)
        praclearn.otherParams['mln'] = args.mln[0]
        praclearn.otherParams['logic'] = args.mln[1]
        praclearn.otherParams['concept'] = args.trainMLN
        praclearn.otherParams['onthefly'] = args.onthefly
        praclearn.training_dbs = infer.inference_steps[-1].output_dbs

        objRecog.train(praclearn)
        sys.exit(0)

    else:  # regular PRAC pipeline
        logger.info('Entering regular inference pipeline')

        # property inference from parsed input
        propExtract = prac.module('prop_extraction')
        prac.run(infer, propExtract)

        objRecog = prac.module('obj_recognition')

        # object inference based on inferred properties
        prac.run(infer, objRecog)

    step = infer.inference_steps[-1]
    print()
    print(prac_heading('PRAC INFERENCE RESULTS'))
    print()
    print('Object description: {}'.format(
        colorize(''.join(sentences), (None, 'white', True), True)))
    print()
    for db in step.output_dbs:
        print('Inferred properties:')
        for ek in sorted(db.evidence):
            e = db.evidence[ek]
            if e == 1.0 and any(
                    ek.startswith(p) for p in [
                        'color', 'size', 'shape', 'hypernym', 'hasa',
                        'dimension', 'consistency', 'material'
                    ]):
                print('{}({}, {}'.format(
                    colorize(ek.split('(')[0], (None, 'white', True), True),
                    colorize(
                        ek.split('(')[1].split(',')[0],
                        (None, 'magenta', True), True),
                    colorize(
                        ek.split('(')[1].split(',')[1], (None, 'green', True),
                        True)))

    for db in step.output_dbs:
        print()
        print('Inferred possible concepts:')
        for ek in sorted(db.evidence, key=db.evidence.get, reverse=True):
            e = db.evidence[ek]
            if e > 0.001 and ek.startswith('object'):
                print('{} {}({}, {})'.format(
                    colorize('{:.4f}'.format(e), (None, 'cyan', True), True),
                    colorize('object', (None, 'white', True), True),
                    colorize(
                        ek.split(',')[0].split('(')[1],
                        (None, 'magenta', True), True),
                    colorize(
                        ek.split(',')[1].split(')')[0], (None, 'yellow', True),
                        True)))
コード例 #14
0
    def __call__(self, node, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Update roles based on Action Core Refinement')

        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        #         planlist = self.getPlanList()
        #         out(node.parent.frame, '->', node.frame)
        pngs = {}
        for i, db_ in enumerate(dbs):
            #             db = db_.copy()
            #             db = PRACDatabase(self.prac)
            # ==================================================================
            # Preprocessing
            # ==================================================================
            actioncore = node.frame.actioncore
            logger.debug('Action core: {}'.format(actioncore))
            if params.get('project', None) is None:
                logger.debug('Loading Project: {}.pracmln'.format(
                    colorize(actioncore, (None, 'cyan', True), True)))
                projectpath = os.path.join(
                    pracloc.pracmodules, self.name,
                    '{}Transformation.pracmln'.format(actioncore))
                project = MLNProject.open(projectpath)
            else:
                logger.debug(
                    colorize('Loading Project from params',
                             (None, 'cyan', True), True))
                projectpath = os.path.join(
                    params.get('projectpath', None)
                    or os.path.join(pracloc.pracmodules, self.name),
                    params.get('project').name)
                project = params.get('project')

            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(
                mlntext,
                searchpaths=[self.module_path],
                projectpath=projectpath,
                logic=project.queryconf.get('logic', 'FirstOrderLogic'),
                grammar=project.queryconf.get('grammar', 'PRACGrammar'))
            result_db = None

            for pdb in node.parent.outdbs:
                db = pdb.copy()
                db = db.union(db_)
                objs = {o.id for o in node.parent.frame.actionroles.values()}
                for w in set(db.domains['word']):
                    if w not in objs:
                        db.rmval('word', w)
                infstep.indbs.append(db)
                ac = node.parent.frame.actioncore
                db << 'achieved_by(%s, %s)' % (ac, actioncore)
                for role, object_ in node.parent.frame.actionroles.iteritems():
                    db << '%s(%s, %s)' % (role, object_.id, ac)
            try:
                # ==========================================================
                # Inference
                # ==========================================================
                infer = self.mlnquery(config=project.queryconf,
                                      db=db,
                                      verbose=self.prac.verbose > 2,
                                      mln=mln)
                result_db = infer.resultdb

                if self.prac.verbose == 2:
                    print
                    print prac_heading('INFERENCE RESULTS')
                    print
                    infer.write()
            except NoConstraintsError:
                logger.error(
                    'no constraints in role transformation: %s -> %s' %
                    (node.parent.frame, node.frame))
                result_db = db

            # ==============================================================
            # Postprocessing
            # ==============================================================
            r_db = PRACDatabase(self.prac)
            roles = self.prac.actioncores[actioncore].roles
            for atom, truth in sorted(result_db.evidence.iteritems()):
                if any(r in atom for r in roles):
                    _, predname, args = self.prac.mln.logic.parse_literal(atom)
                    word, ac = args
                    if ac == actioncore:
                        r_db << (atom, truth)
                        if truth:
                            sense = pdb.sense(word)
                            props = pdb.properties(word)
                            obj = Object(self.prac,
                                         id_=word,
                                         type_=sense,
                                         props=props,
                                         syntax=node.pracinfer.buildword(
                                             pdb, word))
                            node.frame.actionroles[predname] = obj
#             out('->', node.frame)
            unified_db = db.union(r_db, mln=self.prac.mln)
            r_db_ = PRACDatabase(self.prac)

            # It will be assumed that there is only one true action_
            # c1ore predicate per database
            #             for actionverb, actioncore in unified_db.actioncores(): break

            for atom, truth in sorted(unified_db.evidence.iteritems()):
                if 'action_core' in atom: continue
                r_db_ << (atom, truth)
            infstep.outdbs.append(r_db_)

            pngs['RolesTransformation - ' + str(i)] = get_cond_prob_png(
                project.queryconf.get('queries', ''), dbs, filename=self.name)
            infstep.png = pngs
            infstep.applied_settings = project.queryconf.config
        return [node]
コード例 #15
0
    def __call__(self, pracinference, **params):

        # ======================================================================
        # Initialization
        # ======================================================================

        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print(prac_heading('Recognizing Control Structures'))

        if params.get('project', None) is None:
            # load default project
            projectpath = os.path.join(pracloc.pracmodules, self.name,
                                       self.defproject)
            ac_project = MLNProject.open(projectpath)
        else:
            logger.info(
                colorize('Loading Project from params', (None, 'cyan', True),
                         True))
            projectpath = os.path.join(
                params.get('projectpath', None)
                or os.path.join(pracloc.pracmodules, self.name),
                params.get('project').name)
            ac_project = params.get('project')

        dbs = pracinference.inference_steps[-1].output_dbs

        mlntext = ac_project.mlns.get(ac_project.queryconf['mln'], None)
        mln = parse_mln(mlntext,
                        searchpaths=[self.module_path],
                        projectpath=projectpath,
                        logic=ac_project.queryconf.get('logic',
                                                       'FirstOrderLogic'),
                        grammar=ac_project.queryconf.get(
                            'grammar', 'PRACGrammar'))
        inf_step = PRACInferenceStep(pracinference, self)

        pngs = {}
        for i, db in enumerate(dbs):
            db_ = db.copy()

            # ======================================================================
            # Inference
            # ======================================================================

            infer = self.mlnquery(config=ac_project.queryconf, db=db, mln=mln)
            result_db = infer.resultdb

            if self.prac.verbose == 2:
                print()
                print(prac_heading('INFERENCE RESULTS'))
                infer.write()

            # ==========================================================
            # Postprocessing
            # ==========================================================

            for q in result_db.query('event(?w,?ac)'):
                db_ << 'event({},{})'.format(q['?w'], q['?ac'])
            for q in result_db.query('condition(?w)'):
                db_ << 'condition({})'.format(q['?w'])

            inf_step.output_dbs.append(db_)
            pngs['CS' + str(i)] = get_cond_prob_png(ac_project.queryconf.get(
                'queries', ''),
                                                    dbs,
                                                    filename=self.name)
            inf_step.png = pngs

        inf_step.applied_settings = ac_project.queryconf.config

        return inf_step
コード例 #16
0
 def _run(self):
     """
     verbose: whether to print results (or anything at all, in fact)
     details: (given that verbose is true) whether to output additional
              status information
     debug:   (given that verbose is true) if true, outputs debug
              information, in particular the distribution over possible
              worlds
     debugLevel: level of detail for debug mode
     """
     # check consistency with hard constraints:
     self._watch.tag('check hard constraints', verbose=self.verbose)
     hcgrounder = FastConjunctionGrounding(self.mrf, simplify=False, unsatfailure=True, 
                                           formulas=[f for f in self.mrf.formulas if f.weight == HARD], 
                                           **(self._params + {'multicore': False, 'verbose': False}))
     for gf in hcgrounder.itergroundings():
         if isinstance(gf, Logic.TrueFalse) and gf.truth() == .0:
             raise SatisfiabilityException('MLN is unsatisfiable due to hard constraint violation by evidence: {} ({})'.format(str(gf), str(self.mln.formula(gf.idx))))
     self._watch.finish('check hard constraints')
     # compute number of possible worlds
     worlds = 1
     for variable in self.mrf.variables:
         values = variable.valuecount(self.mrf.evidence)
         worlds *= values
     numerators = [0.0 for i in range(len(self.queries))]
     denominator = 0.
     # start summing
     logger.debug("Summing over %d possible worlds..." % worlds)
     if worlds > 500000 and self.verbose:
         print colorize('!!! %d WORLDS WILL BE ENUMERATED !!!' % worlds, (None, 'red', True), True)
     k = 0
     self._watch.tag('enumerating worlds', verbose=self.verbose)
     global global_enumAsk
     global_enumAsk = self
     bar = None
     if self.verbose:
         bar = ProgressBar(width=100, steps=worlds, color='green')
     if self.multicore:
         pool = Pool()
         logger.debug('Using multiprocessing on {} core(s)...'.format(pool._processes))
         try:
             for num, denum in pool.imap(with_tracing(eval_queries), self.mrf.worlds()):
                 denominator += denum
                 k += 1
                 for i, v in enumerate(num):
                     numerators[i] += v
                 if self.verbose: bar.inc()
         except Exception as e:
             logger.error('Error in child process. Terminating pool...')
             pool.close()
             raise e
         finally:
             pool.terminate()
             pool.join()
     else:  # do it single core
         for world in self.mrf.worlds():
             # compute exp. sum of weights for this world
             num, denom = eval_queries(world)
             denominator += denom
             for i, _ in enumerate(self.queries):
                 numerators[i] += num[i]
             k += 1
             if self.verbose:
                 bar.update(float(k) / worlds)
     logger.debug("%d worlds enumerated" % k)
     self._watch.finish('enumerating worlds')
     if 'grounding' in self.grounder.watch.tags:
         self._watch.tags['grounding'] = self.grounder.watch['grounding']
     if denominator == 0:
         raise SatisfiabilityException(
             'MLN is unsatisfiable. All probability masses returned 0.')
     # normalize answers
     dist = map(lambda x: float(x) / denominator, numerators)
     result = {}
     for q, p in zip(self.queries, dist):
         result[str(q)] = p
     return result
コード例 #17
0
    def __call__(self, node, **params):
        # ======================================================================
        # Initialization
        # ======================================================================
        logger.debug('inference on {}'.format(self.name))

        if self.prac.verbose > 0:
            print prac_heading('Recognizing {} Roles'.format({
                True: 'MISSING',
                False: 'GIVEN'
            }[params.get('missing', False)]))

        dbs = node.outdbs
        infstep = PRACInferenceStep(node, self)
        queries = ''
        wnmod = self.prac.module('wn_senses')
        actionroles = defaultdict(list)
        pngs = {}
        for n, olddb in enumerate(dbs):
            db_copy = olddb.copy(mln=self.prac.mln)
            actioncore = node.frame.actioncore
            logger.debug(actioncore)
            if params.get('project', None) is None:
                logger.debug('Loading Project: {}.pracmln'.format(
                    colorize(actioncore, (None, 'cyan', True), True)))
                projectpath = os.path.join(pracloc.pracmodules, self.name,
                                           '{}.pracmln'.format(actioncore))
                project = MLNProject.open(projectpath)
            else:
                logger.debug(
                    colorize('Loading Project from params',
                             (None, 'cyan', True), True))
                projectpath = os.path.join(
                    params.get('projectpath', None)
                    or os.path.join(pracloc.pracmodules, self.name),
                    params.get('project').name)
                project = params.get('project')

            queries = project.queryconf.get('queries', '')
            mlntext = project.mlns.get(project.queryconf['mln'], None)
            mln = parse_mln(mlntext,
                            searchpaths=[self.module_path],
                            projectpath=projectpath,
                            logic=project.queryconf.get('logic', 'FuzzyLogic'),
                            grammar=project.queryconf.get(
                                'grammar', 'PRACGrammar'))
            known_concepts = mln.domains.get('concept', [])

            # ==============================================================
            # Preprocessing
            # ==============================================================
            # adding senses and similarities. might be obsolete as it has
            # already been performed in ac recognition
            logger.debug('adding senses. concepts={}'.format(known_concepts))
            db = wnmod.get_senses_and_similarities(db_copy, known_concepts)

            # we need senses and similarities as well as original evidence
            tmp_union_db = db.union(db_copy, mln=self.prac.mln)

            # ignore roles of false ac's
            new_tmp_union_db = tmp_union_db.copy(mln=self.prac.mln)
            roles = self.prac.actioncores[actioncore].roles
            for ac in tmp_union_db.domains['actioncore']:
                if ac == actioncore: continue
                for r in roles:
                    for w in new_tmp_union_db.words():
                        new_tmp_union_db << ('{}({},{})'.format(r, w, ac), 0)

            infstep.indbs.append(new_tmp_union_db)
            # ==============================================================
            # Inference
            # ==============================================================

            infer = self.mlnquery(config=project.queryconf,
                                  verbose=self.prac.verbose > 2,
                                  db=new_tmp_union_db,
                                  mln=mln)
            resultdb = infer.resultdb

            if self.prac.verbose == 2:
                print
                print prac_heading('INFERENCE RESULTS')
                infer.write()

            # ==============================================================
            # Postprocessing
            # ==============================================================
            # get query roles for given actioncore and add inference results
            # for them to final output db. ignore 0-truth results.
            unified_db = new_tmp_union_db.union(resultdb, mln=self.prac.mln)
            #             node.frame.actionroles = defaultdict(list)
            for role, word in unified_db.rolesw(actioncore):
                sense = unified_db.sense(word)
                props = dict(unified_db.properties(word))
                obj = Object(self.prac,
                             id_=word,
                             type_=sense,
                             props=props,
                             syntax=node.pracinfer.buildword(unified_db, word))
                actionroles[role].append(obj)

            # argdoms = kb.query_mln.predicate(role).argdoms
            roles = self.prac.actioncores[actioncore].roles
            new_result = PRACDatabase(self.prac)
            for atom, truth in unified_db.evidence.iteritems():
                if any(r in atom for r in roles):
                    (_, predname,
                     args) = self.prac.mln.logic.parse_literal(atom)
                    if not args[-1] == actioncore:
                        continue
                new_result << (atom, truth)

            for q in unified_db.query('has_sense(?w, ?s)'):
                # TODO Add additional formulas to avoid the using of null values
                if self.prac.verbose > 1:
                    print colorize('  WORD:', (None, 'white', True),
                                   True), q['?w']
                    print colorize('  SENSE:', (None, 'white', True),
                                   True), q['?s']
                    wnmod.printWordSenses(
                        wnmod.get_possible_meanings_of_word(
                            unified_db, q['?w']), q['?s'])
                    print

            infstep.outdbs.append(new_result)

            pngs['Recognizing {} roles - {}'.format(
                'given', str(n))] = get_cond_prob_png(queries,
                                                      infstep.indbs,
                                                      filename=self.name)
            infstep.png = pngs

            if 'project' not in locals():
                raise Exception('no actioncore in database: %s' % olddb)

            infstep.applied_settings = project.queryconf.config
#         pprint(actionroles)
        newframes = splitd(actionroles)
        pred = None
        for newframe in newframes:
            #             pprint(newframe)
            f = Frame(self.prac,
                      node.frame.sidx,
                      node.frame.sentence,
                      syntax=list(olddb.syntax()),
                      words=node.frame.words,
                      actioncore=node.frame.actioncore,
                      actionroles=newframe)
            logger.debug('created new frame %s' % f)
            #             for db in infstep.outdbs:
            #                 out(db.syntax())
            pred = FrameNode(node.pracinfer,
                             f,
                             node,
                             pred,
                             indbs=infstep.outdbs,
                             prevmod=self.name)
            yield pred