Example #1
0
 def recFwdPath(cls,desc,viewCache,ignore=[]):
     """
     >>> R = PomdpAntieSimRecognizer
     >>> R.recFwdPath(Thing(dist='0:', Appear=['OPEN'], value=Path, type='Path'), ViewCache(obs=[(Wall, Empty, Cement, End, Wall, End)]))
     False
     >>> R.recFwdPath(Thing(dist='0:', Appear=['OPEN'], value=Path, type='Path'), ViewCache(obs=[(Wall, Empty, Wall, Fish, Cement, Fish),(Wall, Empty, Cement, End, Wall, End)]))
     True
     >>> R.recFwdPath(Thing(dist='0:', Appear=[Brick], value=Path, type='Path', side=[Front], Structural=[Sides]), ViewCache(obs=[(Wall, Empty, Honeycomb, Fish, Rose, Fish), (Wall, Empty, Wall, Fish, Rose, Fish), (Brick, Hatrack, Brick, Eiffel, Rose, Eiffel), (BlueTile, Empty, BlueTile, End, Wall, End)]))
     True
     
     Test Longer end of the hallway.
     >>> desc = Thing(dist='0', Part=[Thing(Appear=[Brick], value=Path, type='Path')], value=End, type='Struct', side=[Front], Structural=['Long'])
     >>> viewCache = ViewCache(obs=[(Brick, Empty, Brick, End, Wall, End)])
     >>> R.recFwdPath(desc, viewCache)
     False
     >>> viewCache.update(Right, [(Wall, Empty, Cement, Butterfly, Brick, Butterfly), (Wall, Empty, Wall, End, Wall, End)])
     >>> R.recFwdPath(desc, viewCache)
     False
     >>> viewCache.update(Right, [(Brick, Empty, Brick, Butterfly, Cement, Butterfly), (Wall, Empty, Wall, End, Wall, End)])
     >>> R.recFwdPath(desc, viewCache)
     False
     >>> viewCache.update(Right, [(Wall, Empty, Cement, Butterfly, Brick, Butterfly), (Wood, Empty, Wood, Butterfly, Brick, Butterfly), (Wall, Empty, Wall, End, Wall, End)])
     >>> R.recFwdPath(desc, viewCache)
     True
     """
     #logger.debug('recFwdPath(%r, %r, %r, %r)',view,desc,viewCache,ignore)
     view = viewCache[Front]
     match = cls.recPathAppear(view,desc,view[Front],ignore)
     if (match and #desc.dist != '0' and 
         hasattr(desc,'Structural') and 'Structural' not in ignore and desc.Structural
         and True in [s in Structurals.values() for s in desc.Structural]):
         if not Options.PerspectiveTaking: return match
         length = len(view)
         match = False
         for side in (Back,): #(Left,Back,Right):
             v = viewCache[side]
             if not v or Unknown in v[0].view[0] or len(v)==1: continue
             sides = desc.side
             desc.side = [Sides]
             if cls.recFwdPath(desc,viewCache.rotate(side),ignore+['Structural']):
                 if desc.Structural == [Short]:
                     match = (view == v or length < len(v))
                 elif desc.Structural == [Long]:
                     match = (length > len(v))
                 elif desc.Structural == [Winding]:
                     match = True
                 else: logger.warning('Unknown Structural %r', desc.Structural)
             desc.side = sides
             if match: break
     if (not match and Options.IntersectionLandmarks
         and hasattr(desc,'Structural') and 'Structural' not in ignore and Sides in desc.Structural):
         descCp = copy.deepcopy(desc)
         del descCp.Structural
         descCp.side=[Sides]
         match = cls.recSidePath(descCp,viewCache)
     return match # Prob
 def _execute(self,Actions):
     for i,action in enumerate(Actions):
         logger.info('<%d> %s',i,action)
         if self.NLUQueue: self.NLUQueue.put(('Executing',i))
         try:
             results = action.execute(self.robot)
             logger.info('<%d> %s => %r', i,action,results)
         except OverflowWarning,e:
             logger.warning('<%d> %s => %s', i,action,e)
         except StopIteration,e:
             results = e    
             logger.info('<%d> %s => %r', i,action,results)
             logger.info('End of Instruction Execution after <%d>', i)
Example #3
0
 def _execute(self,Actions):
     for i,action in enumerate(Actions):
         logger.info('<%d> %s',i,action)
         if self.NLUQueue: self.NLUQueue.put(('Executing',i))
         try:
             results = action.execute(self.robot)
             logger.info('<%d> %s => %r', i,action,results)
         except Warning,e:
             logger.warning('<%d> %s => %s', i,action,e)
         except StopIteration,e:
             results = e    
             logger.info('<%d> %s => %r', i,action,results)
             logger.info('End of Instruction Execution after <%d>', i)
def extractSurfaceSemantics(token,parent):
    global Senses
    POS=getPartOfSpeech(token,parent)
    tokenSenses = {}
    text = token['TEXT'].lower()
    default = token['TEXT'].upper()
    if POS in ['N', 'V', 'ADV', 'ADJ']:
        try: #Redo as test = foo while not tokenSensesword: try: foo ; except KeyError: foo = next foo
            tokenSenses = Senses[text]
        except KeyError:
            logger.warning('extractSurfaceSemantics : Text not in tagged senses: %s', text)
            try: 
                #logger.warning('extractSurfaceSemantics : Previously unseen word but in WordNet?: %s', text)
                # stringified range of possible senses without spaces
                tokenSenses = {POS : range(1,len(pywordnet.getWord(text,POS).getSenses())+1)}
            except KeyError:
                try:
                    logger.warning('extractSurfaceSemantics : Inflected version of WordNet word? %s', text)
                    if text.endswith('s'):
                        text = text[:-1]
                        tokenSenses = Senses[text]
                    else:
                        stemmer = PorterStemmer() # Update WordNetStemmer to NLTK 1.4 API
                        stemmer.stem(token)
                        text = token['STEM']
                        tokenSenses = Senses[text]
                except KeyError:
                    text = token['TEXT'].lower()
                    try:
                        logger.warning('extractSurfaceSemantics : Misspelling / typo of WordNet word? %s', text)
                        spellchecker = enchant.DictWithPWL('en_US', Lexicon)
                        s = ''
                        for s in spellchecker.suggest(text):
                            if s in Senses:
                                tokenSenses = Senses[s]
                                break
                        if not tokenSenses and spellchecker.suggest(text):
                            s = spellchecker.suggest(text)[0]
                            tokenSenses = {POS : range(1,len(pywordnet.getWord(s,POS).getSenses())+1)}
                        if s and Options.Spellcheck:
                            logger.warning('extractSurfaceSemantics : Found spelling correction %s for %s', s,text)
                            text = s
                        #logger.debug('*** extractSurfaceSemantics : Implement spelling correction. *** ')
                        #raise KeyError
                    except KeyError:
                        logger.error('extractSurfaceSemantics : Unknown token: %s', text)
                        return default
        # Handle experienced typos.
        if 'see' in tokenSenses:
            ### FIXME adding to dict for typos that are other words
            text = tokenSenses['see']
            try:
                tokenSenses = Senses[text]
            except: return default
        # Handle morphology variants that wordnet understands.
        elif isinstance(tokenSenses, tuple):
            text,tokenSenses[POS] = tokenSenses[POS]
        try:
            return '_'.join([text,POS,','.join([str(i) for i in tokenSenses[POS]])])
        except KeyError:
            #logger.warning('extractSurfaceSemantics : Expected POS %s for token %s, Got %s, Using %s',
            #            POS, token, tokenSenses.keys(), tokenSenses.keys()[0])
            if tokenSenses.keys():
                POS = token['POS'] = tokenSenses.keys()[0]
                return '_'.join([text,POS,','.join([str(i) for i in tokenSenses.values()[0]])])
        except Exception,e:
            logger.error('extractSurfaceSemantics: %s: Could not find sense %s for token %s',
                      e, POS, token) #tokenSenses, text
Example #5
0
def extractSurfaceSemantics(token, parent):
    global Senses
    POS = getPartOfSpeech(token, parent)
    tokenSenses = {}
    text = token["TEXT"].lower()
    default = token["TEXT"].upper()
    if POS in ["N", "V", "ADV", "ADJ"]:
        try:  # Redo as test = foo while not tokenSensesword: try: foo ; except KeyError: foo = next foo
            tokenSenses = Senses[text]
        except KeyError:
            logger.warning("extractSurfaceSemantics : Text not in tagged senses: %s", text)
            try:
                # logger.warning('extractSurfaceSemantics : Previously unseen word but in WordNet?: %s', text)
                # stringified range of possible senses without spaces
                tokenSenses = {POS: range(1, len(pywordnet.getWord(text, POS).getSenses()) + 1)}
            except KeyError:
                try:
                    logger.warning("extractSurfaceSemantics : Inflected version of WordNet word? %s", text)
                    if text.endswith("s"):
                        text = text[:-1]
                        tokenSenses = Senses[text]
                    else:
                        stemmer = PorterStemmer()  # Update WordNetStemmer to NLTK 1.4 API
                        stemmer.stem(token)
                        text = token["STEM"]
                        tokenSenses = Senses[text]
                except KeyError:
                    text = token["TEXT"].lower()
                    try:
                        logger.warning("extractSurfaceSemantics : Misspelling / typo of WordNet word? %s", text)
                        spellchecker = enchant.DictWithPWL("en_US", Lexicon)
                        s = ""
                        for s in spellchecker.suggest(text):
                            if s in Senses:
                                tokenSenses = Senses[s]
                                break
                        if not tokenSenses and spellchecker.suggest(text):
                            s = spellchecker.suggest(text)[0]
                            tokenSenses = {POS: range(1, len(pywordnet.getWord(s, POS).getSenses()) + 1)}
                        if s and Options.Spellcheck:
                            logger.warning("extractSurfaceSemantics : Found spelling correction %s for %s", s, text)
                            text = s
                        # logger.debug('*** extractSurfaceSemantics : Implement spelling correction. *** ')
                        # raise KeyError
                    except KeyError:
                        logger.error("extractSurfaceSemantics : Unknown token: %s", text)
                        return default
        # Handle experienced typos.
        if "see" in tokenSenses:
            ### FIXME adding to dict for typos that are other words
            text = tokenSenses["see"]
            try:
                tokenSenses = Senses[text]
            except:
                return default
        # Handle morphology variants that wordnet understands.
        elif isinstance(tokenSenses, tuple):
            text, tokenSenses[POS] = tokenSenses[POS]
        try:
            return "_".join([text, POS, ",".join([str(i) for i in tokenSenses[POS]])])
        except KeyError:
            # logger.warning('extractSurfaceSemantics : Expected POS %s for token %s, Got %s, Using %s',
            #            POS, token, tokenSenses.keys(), tokenSenses.keys()[0])
            if tokenSenses.keys():
                POS = token["POS"] = tokenSenses.keys()[0]
                return "_".join([text, POS, ",".join([str(i) for i in tokenSenses.values()[0]])])
        except Exception, e:
            logger.error(
                "extractSurfaceSemantics: %s: Could not find sense %s for token %s", e, POS, token
            )  # tokenSenses, text