def __init__(self): """Initializes the agent""" super().__init__(output=True) self.memory = Memory(self) self.vision = Vision(self) self.audition = Audition(self) self.hands = Hands(self) self.mouse = Mouse(self.hands, self.vision) self.typing = Typing(self.hands) self.language = Language(self) self.language.add_interpreter(self.interpret)
def __init__(self): super().__init__(output=True) self.vision = Vision(self) self.memory = Memory(self) self.audition = Audition(self) self.typing = Typing(Hands(self)) def interpreter(words): if words[0] == 'read': sem = Item(isa='action', type='read', object=words[1]) pointer = self.vision.find(isa='pointer') if pointer is not None: self.vision.encode(pointer) sem.set('x', pointer.x).set('y', pointer.y) return sem elif words[0] == 'done': return Item(isa='done') else: return Item(isa='action', type=words[0], object=words[1]) self.language = Language(self) self.language.add_interpreter(interpreter) def executor(action, context): if action.type == 'read': query = Query(x=action.x, y=action.y) context.set(action.object, self.vision.find_and_encode(query)) elif action.type == 'type': self.typing.type(context.get(action.object)) self.instruction = Instruction( self, self.memory, self.audition, self.language) self.instruction.add_executor(executor)
def test_instruction_read(self, output=False): agent = Agent(output=output) memory = Memory(agent) env = Environment() vision = Vision(agent, env.display) audition = Audition(agent, env.speakers) def interpreter(words): if words[0] == 'read': sem = Item(isa='action', type='read', object=words[1]) pointer = vision.find(isa='pointer') if pointer is not None: vision.encode(pointer) sem.set('x', pointer.x).set('y', pointer.y) return sem elif words[0] == 'done': return Item(isa='done') else: return Item(isa='action', type=words[0], object=words[1]) language = Language(agent) language.add_interpreter(interpreter) def executor(action, context): query = Query(x=action.x, y=action.y) context.set(action.object, vision.find_and_encode(query)) instruction = Instruction(agent, memory, audition, language) instruction.add_executor(executor) equation = ['3', 'x', '/', '12', '=', '15', '/', '4'] for i in range(0, len(equation)): env.display.add_text(50 + 50 * i, 50, equation[i]) pointer = env.display.add(50, 50, 1, 1, 'pointer', 'pointer') speech = [ 'to solve', ['read a', (50, 50)], ['read A', (300, 50)], 'done' ] def thread(): for line in speech: agent.wait(3.0) if isinstance(line, str): audition.add(Aural(isa='speech'), line) else: audition.add(Aural(isa='speech'), line[0]) loc = line[1] pointer.move(loc[0], loc[1]) agent.run_thread(thread) goal = instruction.listen_and_learn() self.assertEqual('solve', goal) context = instruction.execute(goal) self.assertEqual('3', context.a) self.assertEqual('15', context.A) agent.wait_for_all()
def test_audition(self, output=False): agent = Agent(output=output) audition = Audition(agent) word = 'Hello' aural = Aural('word') audition.add(aural, word) aural2 = audition.listen(isa='word') self.assertEqual(aural, aural2) word2 = audition.encode(aural2) self.assertEqual(word, word2) text = 'Looks like this is working' audition.add_speech(text) word = audition.listen_and_encode(isa='word') heard = [] while word is not None: heard.append(word) word = audition.listen_and_encode(isa='word') self.assertEqual(text, ' '.join(heard)) agent.wait_for_all()
def __init__(self): """Initializes the agent""" super().__init__(output=True) self.memory = Memory(self) self.vision = Vision(self) self.audition = Audition(self) self.typing = Typing(Hands(self)) self.language = Language(self) self.language.add_interpreter(self.interpret) self.instruction = Instruction(self, self.memory, self.audition, self.language) self.instruction.add_executor(self.execute)
def __init__(self, env, output=True): super().__init__(output=output) #basic pass-ins for now for speed of testing self.memory = OntologyMemory(self,stopOldServer=False,owlFile='uagent.owl') self.vision = Vision(self, env.display) self.audition = Audition(self, env.speakers) self.motor = Motor(self, self.vision, env) self.interpreter = Interpreter(self.memory) self.language = Language(self) self.language.add_interpreter(lambda words: self.interpreter.interpret_ace(' '.join(words))) self.condition_handler = ConditionHandler(self) self.action_handler = ActionHandler(self)
def test_audition(self, output=False): agent = Agent(output=output) speakers = Environment().speakers audition = Audition(agent, speakers) word = 'Hello' speakers.add('word', word) aural = audition.listen(isa='word') word2 = audition.encode(aural) self.assertEqual(word, word2) text = 'Looks like this is working' speakers.add_speech(text) word = audition.listen_and_encode(isa='word') heard = [] while word is not None: heard.append(word) word = audition.listen_and_encode(isa='word') self.assertEqual(text, ' '.join(heard)) agent.wait_for_all()
def test_instruction_type(self, output=False): agent = Agent(output=output) env = Environment() memory = Memory(agent) vision = Vision(agent, env.display) audition = Audition(agent, env.speakers) motor = Motor(agent, vision, env) def interpreter(words): if words[0] == 'read': sem = Item(isa='action', type='read', object=words[1]) pointer = vision.find(isa='pointer') if pointer is not None: vision.encode(pointer) sem.set('x', pointer.x).set('y', pointer.y) return sem elif words[0] == 'done': return Item(isa='done') else: return Item(isa='action', type=words[0], object=words[1]) language = Language(agent) language.add_interpreter(interpreter) def executor(action, context): if action.type == 'read': query = Query(x=action.x, y=action.y) context.set(action.object, vision.find_and_encode(query)) elif action.type == 'type': motor.type(context.get(action.object)) instruction = Instruction(agent, memory, audition, language) instruction.add_executor(executor) typed = [] def type_handler(key): typed.append(key) env.keyboard.add_type_fn(type_handler) env.display.add_text(50, 50, 'a') pointer = env.display.add(50, 50, 1, 1, 'pointer', 'pointer') speech = ['to type', ['read letter', (50, 50)], 'type letter', 'done'] def thread(): for line in speech: agent.wait(3.0) if isinstance(line, str): audition.add(Aural(isa='speech'), line) else: audition.add(Aural(isa='speech'), line[0]) loc = line[1] pointer.move(loc[0], loc[1]) agent.run_thread(thread) goal = instruction.listen_and_learn() self.assertEqual('type', goal) context = instruction.execute(goal) self.assertEqual('a', context.letter) agent.wait_for_all() self.assertEqual(['a'], typed)
def __init__(self): """Initializes the agent""" super().__init__(output=True) self.vision = Vision(self) self.audition = Audition(self) self.typing = Typing(Hands(self))
class OWLUndifferentiatedAgent(Agent): def __init__(self): """Initializes the agent""" super().__init__(output=True) self.memory = Memory(self) self.vision = Vision(self) self.audition = Audition(self) self.hands = Hands(self) self.mouse = Mouse(self.hands, self.vision) self.typing = Typing(self.hands) self.language = Language(self) self.language.add_interpreter(self.interpret) # self.instruction = Instruction( # self, self.memory, self.audition, self.language) # self.instruction.add_executor(self.execute) def _interpret_predicate(self, text, isa='fact', last=None): chunk = None (pred, args) = text.replace(')', '').split('(') args = args.split(',') if len(args) == 1: chunk = Chunk(isa=isa, predicate='isa', subject=args[0], object=pred) elif len(args) == 2: chunk = Chunk(isa=isa, predicate=pred, subject=args[0], object=args[1]) if chunk: if last: chunk.set('last', last.id) self.memory.store(chunk) return chunk def _interpret_rule(self, text): lhs, rhs = text.split('=>') pred_pat = re.compile(r'[A-Za-z_-]+\([A-Za-z_,-]*\)') rule = Chunk(isa='rule') self.memory.store(rule) last = rule for t in pred_pat.findall(lhs): chunk = self._interpret_predicate(t, isa='condition', last=last) last = chunk last = rule for t in pred_pat.findall(rhs): chunk = self._interpret_predicate(t, isa='action', last=last) last = chunk return rule def _interpret_owl(self, text): text = text.replace(' ', '') if text.find('=>') >= 0: return self._interpret_rule(text) else: return self._interpret_predicate(text) def interpret(self, words): return self._interpret_owl(''.join(words)) def _deep_find(self, isa): visual = self.vision.find(isa=isa, seen=False) if visual: return visual else: part_of = self.memory.recall(predicate='isPartOf', object=isa) if part_of: return self._deep_find(part_of.subject) else: return None def _execute_condition(self, cond, context): if cond.predicate == 'appearsIn': visual = self._deep_find(cond.subject) if visual: context.set('visual', visual) visobj = self.vision.encode(visual) context.set(cond.subject, visobj) return True return False def _execute_action(self, action, context): if action.subject == 'Subject': print('************** ' + action.predicate) if action.predicate == 'click': visual = context.get('visual') self.mouse.point_and_click(visual) elif action.predicate == 'remember': pass def execute(self, chunk, context): if chunk.isa == 'rule': cond = self.memory.recall(isa='condition', last=chunk.id) while cond: if not self._execute_condition(cond, context): return False cond = self.memory.recall(isa='condition', last=cond.id) act = self.memory.recall(isa='action', last=chunk.id) while act: self._execute_action(act, context) act = self.memory.recall(isa='action', last=act.id) return True def run(self, time=300): context = Item() chunk = None done = Query(predicate='isa', object='done') while not (chunk and done.matches(chunk)): text = self.audition.listen_for_and_encode() chunk = self.language.interpret(text) while self.time() < time: chunk = self.memory.recall(isa='rule') self.execute(chunk, context)
def __init__(self): super().__init__(output=True) self.vision = Vision(self) self.audition = Audition(self) self.typing = Typing(Hands(self))