def _done_callback(fut): """ Log and raise exception of task world, if there is one. Additionally, set active agent to overworld agent. """ e = fut.exception() if e is not None: shared_utils.print_and_log( logging.ERROR, 'World {} had error {}'.format(world_type, repr(e)), should_print=True, ) traceback.print_exc(file=sys.stdout) for agent in agents: self.observe_message( agent.id, 'Sorry, this world closed. Returning to overworld.' ) else: shared_utils.print_and_log( logging.INFO, 'World {} had no error'.format(world_type), should_print=True, ) self.active_worlds[task_id] = None for agent in agents: self.after_agent_removed(agent.id) agent_state = self.get_agent_state(agent.id) agent_state.set_active_agent(agent_state.get_overworld_agent())
def _world_function(): world_generator = utils.get_world_fn_attr( self._world_module, overworld_name, "generate_world" ) overworld = world_generator(self.opt, [overworld_agent]) while not overworld.episode_done() and not self.system_done: world_type = overworld.parley() if world_type is None: time.sleep(0.5) continue # perform onboarding onboard_type = onboard_map.get(world_type) if onboard_type: onboard_id = 'onboard-{}-{}'.format(overworld_agent.id, time.time()) agent = self.manager._create_agent(onboard_id, overworld_agent.id) agent_state.set_active_agent(agent) agent_state.assign_agent_to_task(agent, onboard_id) _, onboard_data = self._run_world(task, onboard_type, [agent]) agent_state.onboard_data = onboard_data self.manager.add_agent_to_pool(agent_state, world_type) utils.print_and_log(logging.INFO, 'onboarding/overworld complete') time.sleep(5) # sleep until agent returns from task world while agent_state.get_active_agent() != overworld_agent: time.sleep(2) overworld.return_overworld() utils.print_and_log(logging.INFO, 'exiting overworld') return world_type
def _log_missing_agent(self, agent_id, assignment_id): """ Log the occurence of a missing agent. """ shared_utils.print_and_log( logging.WARN, 'Expected to have an agent for {}_{}, yet none was found'.format( agent_id, assignment_id), )
def _done_callback(fut): e = fut.exception() if e is not None: shared_utils.print_and_log( logging.ERROR, 'World {} had error {}'.format(task_id, repr(e)), should_print=True, ) if self.debug: raise e
def setup_server(self): """ Prepare the Messenger server for handling messages. """ if self.bypass_server_setup: return shared_utils.print_and_log( logging.INFO, '\nYou are going to allow people on Facebook to be agents in ' 'ParlAI.\nDuring this process, Internet connection is required, ' 'and you should turn off your computer\'s auto-sleep ' 'feature.\n', should_print=True, ) input('Please press Enter to continue... ') shared_utils.print_and_log(logging.NOTSET, '', True) if self.opt['local'] is True: shared_utils.print_and_log( logging.INFO, 'In order to run the server locally, you will need ' 'to have a public HTTPS endpoint (SSL signed) running on ' 'the server you are currently excecuting ParlAI on. Enter ' 'that public URL hostname when prompted and ensure that the ' 'port being used by ParlAI (usually 3000) has external ' 'traffic routed to it.', should_print=True, ) input('Please press Enter to continue... ') shared_utils.print_and_log( logging.INFO, 'Setting up Messenger webhook...', should_print=True ) # Setup the server with a task name related to the current task task_name = '{}-{}'.format('ParlAI-Messenger', self.opt['task']) self.server_task_name = ''.join( e for e in task_name.lower() if e.isalnum() or e == '-' ) self.server_url = server_utils.setup_server( self.server_task_name, local=self.opt['local'] ) shared_utils.print_and_log( logging.INFO, 'Webhook address: {}/webhook'.format(self.server_url), should_print=True, )
def shutdown(self): """ Handle any client shutdown cleanup. """ try: self.is_running = False self.world_runner.shutdown() if not self.bypass_server_setup: self.socket.keep_running = False self._expire_all_conversations() except BaseException as e: shared_utils.print_and_log(logging.ERROR, f'world ended in error: {e}') finally: if not self.bypass_server_setup: server_utils.delete_server(self.server_task_name, self.opt['local'])
def _done_callback(fut): e = fut.exception() if e is not None: shared_utils.print_and_log( logging.ERROR, 'World {} had error {}'.format(task_id, repr(e)), should_print=True, ) if self.debug: raise e else: self.observe_message(agent_id, 'See you later!') for world_type in self.agent_pool: agent_state = self.get_agent_state(agent_id) if agent_state in self.agent_pool[world_type]: self.agent_pool[world_type].remove(agent_state) self.remove_agent_from_pool(agent_state, world_type=world_type) del self.messenger_agent_states[agent_id] del self.agent_id_to_overworld_future[agent_id]
def setup_socket(self): """ Set up socket to start communicating to workers. """ if not self.bypass_server_setup: shared_utils.print_and_log(logging.INFO, 'Local: Setting up WebSocket...', should_print=True) self.app_token = self.get_app_token() self.sender = MessageSender(self.app_token) # Set up receive if not self.bypass_server_setup: socket_use_url = self.server_url if self.opt['local']: # skip some hops for local stuff socket_use_url = 'https://localhost' self.socket = ChatServiceMessageSocket(socket_use_url, self.port, self._handle_webhook_event) shared_utils.print_and_log(logging.INFO, 'done with websocket', should_print=True)
def start_task(self): """ Begin handling task. Periodically check to see when enough agents are in the agent pool to start an instance of the task. Continue doing this until the desired number of conversations is had. """ self.running = True while self.running: # Loop forever until the server is shut down with self.agent_pool_change_condition: valid_pools = self._get_unique_pool() for world_type, agent_pool in valid_pools.items(): # check if agent has exceeded max time in pool world_config = self.task_configs[world_type] if world_config.max_time_in_pool is not None: self.check_timeout_in_pool( world_type, agent_pool, world_config.max_time_in_pool, world_config.backup_task, ) needed_agents = self.max_agents_for[world_type] if len(agent_pool) >= needed_agents: shared_utils.print_and_log(logging.INFO, 'starting pool', should_print=True) # enough agents in pool to start new conversation self.conversation_index += 1 task_id = 't_{}'.format(self.conversation_index) # Add the required number of valid agents to the conv agent_states = [w for w in agent_pool[:needed_agents]] agents = [] for state in agent_states: agent = self._create_agent(task_id, state.get_id()) agent.onboard_data = state.onboard_data state.assign_agent_to_task(agent, task_id) state.set_active_agent(agent) agents.append(agent) # reset wait message state state.stored_data['seen_wait_message'] = False assign_role_function = shared_utils.get_assign_roles_fn( self.world_module, self.taskworld_map[world_type]) if assign_role_function is None: assign_role_function = shared_utils.default_assign_roles_fn assign_role_function(agents) # Allow task creator to filter out workers and run # versions of the task that require fewer agents agents = [a for a in agents if a.disp_id is not None] for a in agents: # Remove selected workers from the agent pool self.remove_agent_from_pool( self.get_agent_state(a.id), world_type=world_type, mark_removed=False, ) for a in agents: partner_list = agents.copy() partner_list.remove(a) a.message_partners = partner_list done_callback = self._get_done_callback_for_agents( task_id, world_type, agents) # launch task world. future = self.world_runner.launch_task_world( task_id, self.taskworld_map[world_type], agents) future.add_done_callback(done_callback) self.active_worlds[task_id] = future time.sleep(shared_utils.THREAD_MEDIUM_SLEEP)
def _log_debug(self, text): time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') shared_utils.print_and_log(logging.DEBUG, f'{time}: {text}', should_print=True)
def _world_fn(): utils.print_and_log(logging.INFO, 'Starting task {}...'.format(task_name)) return self._run_world(task, world_name, agents)
def _manager_loop_fn(self): """ An iteration of the manager's main loop to launch worlds. """ def _done_callback(fut): """ Log and raise exception of task world, if there is one. Additionally, set active agent to overworld agent. """ e = fut.exception() if e is not None: shared_utils.print_and_log( logging.ERROR, 'World {} had error {}'.format(world_type, repr(e)), should_print=True, ) traceback.print_exc(file=sys.stdout) for agent in agents: self.observe_message( agent.id, 'Sorry, this world closed. Returning to overworld.') else: shared_utils.print_and_log( logging.INFO, 'World {} had no error'.format(world_type), should_print=True, ) self.active_worlds[task_id] = None for agent in agents: agent_state = self.get_agent_state(agent.id) agent_state.set_active_agent(agent_state.get_overworld_agent()) with self.agent_pool_change_condition: valid_pools = self._get_unique_pool() for world_type, agent_pool in valid_pools.items(): # check if agent has exceeded max time in pool world_config = self.task_configs[world_type] if world_config.max_time_in_pool is not None: self.check_timeout_in_pool( world_type, agent_pool, world_config.max_time_in_pool, world_config.backup_task, ) needed_agents = self.max_agents_for[world_type] if len(agent_pool) >= needed_agents: shared_utils.print_and_log(logging.INFO, 'starting pool', should_print=True) # enough agents in pool to start new conversation self.conversation_index += 1 task_id = 't_{}'.format(self.conversation_index) # Add the required number of valid agents to the conv agent_states = [w for w in agent_pool[:needed_agents]] agents = [] for state in agent_states: agent = self._create_agent(task_id, state.get_id()) agent.onboard_data = state.onboard_data state.assign_agent_to_task(agent, task_id) state.set_active_agent(agent) agents.append(agent) # reset wait message state state.stored_data['seen_wait_message'] = False assign_role_function = shared_utils.get_assign_roles_fn( self.world_module, self.taskworld_map[world_type]) if assign_role_function is None: assign_role_function = shared_utils.default_assign_roles_fn assign_role_function(agents) # Allow task creator to filter out workers and run # versions of the task that require fewer agents for a in agents: # Remove selected workers from the agent pool self.remove_agent_from_pool( self.get_agent_state(a.id), world_type=world_type, mark_removed=False, ) for a in agents: partner_list = agents.copy() partner_list.remove(a) a.message_partners = partner_list # launch task world. future = self.world_runner.launch_task_world( task_id, self.taskworld_map[world_type], agents) future.add_done_callback(_done_callback) self.active_worlds[task_id] = future