def _test_turn_annotations_static_task( self, task_data_path: str, overrides: List[str], data_regression: DataRegressionFixture, config_name: str = 'example', ): """ Test the static turn annotations task under specific conditions. Pass in parameters that will change depending on how we're testing the static turn annotations task. """ # # Load the .json of the task data with open(task_data_path) as f: task_data = json.load(f) # # Setup build_task(task_directory=TASK_DIRECTORY) self._set_up_config( task_directory=TASK_DIRECTORY, overrides=overrides, config_name=config_name, ) # Set up the operator and server self._set_up_server() self._test_agent_state(task_data=task_data, data_regression=data_regression)
def run_static_task(cfg: DictConfig, task_directory: str): """ Run static task, given configuration. """ db, cfg = load_db_and_process_config(cfg) print(f'\nHydra config:\n{OmegaConf.to_yaml(cfg)}') random.seed(42) task_name = cfg.mephisto.task.get('task_name', 'turn_annotations_static') soft_block_qual_name = cfg.mephisto.blueprint.get('block_qualification', f'{task_name}_block') # Default to a task-specific name to avoid soft-block collisions soft_block_mturk_workers(cfg=cfg, db=db, soft_block_qual_name=soft_block_qual_name) build_task(task_directory) operator = Operator(db) operator.validate_and_run_config(run_config=cfg.mephisto, shared_state=None) operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=cfg.monitoring_log_rate)
def test_base_task(self): # Paths expected_states_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'expected_states') expected_state_path = os.path.join(expected_states_folder, 'state.json') # # Setup build_task(task_directory=TASK_DIRECTORY) # Set up the config and database overrides = [ 'mephisto.blueprint.num_conversations=1', 'mephisto.task.allowed_concurrent=0', '+turn_timeout=300', ] # TODO: remove all of these params once Hydra 1.1 is released with # support for recursive defaults self._set_up_config( blueprint_type=BLUEPRINT_TYPE, task_directory=TASK_DIRECTORY, overrides=overrides, ) # Set up the operator and server teacher = get_teacher(self.config) world_opt = { "turn_timeout": self.config.turn_timeout, "teacher": teacher } shared_state = SharedParlAITaskState( world_opt=world_opt, onboarding_world_opt=world_opt) self._set_up_server(shared_state=shared_state) # Check that the agent states are as they should be with open(expected_state_path) as f: expected_state = json.load(f) self._test_agent_states( num_agents=1, agent_display_ids=AGENT_DISPLAY_IDS, agent_messages=AGENT_MESSAGES, form_messages=FORM_MESSAGES, form_task_data=FORM_TASK_DATA, expected_states=(expected_state, ), )
def _test_turn_annotations_static_task( self, blueprint_type: str, task_data_path: str, overrides: List[str], data_regression: DataRegressionFixture, ): """ Test the static turn annotations task under specific conditions. Pass in parameters that will change depending on how we're testing the static turn annotations task. """ # # Load the .json of the task data with open(task_data_path) as f: task_data = json.load(f) # # Setup build_task(task_directory=TASK_DIRECTORY) # Set up the config and database overrides += [ '+mephisto.blueprint.conversation_count=null', 'mephisto.blueprint.onboarding_qualification=null', '+mephisto.blueprint.random_seed=42', '+mephisto.task.assignment_duration_in_seconds=1800', ] # TODO: remove all of these params once Hydra 1.1 is released with support # for recursive defaults # TODO: test onboarding as well, and don't nullify the # onboarding_qualification param self._set_up_config( blueprint_type=blueprint_type, task_directory=TASK_DIRECTORY, overrides=overrides, ) # Set up the operator and server self._set_up_server() self._test_agent_state(task_data=task_data, data_regression=data_regression)
def main(cfg: DictConfig) -> None: db, cfg = load_db_and_process_config(cfg) teacher = get_teacher(cfg) world_opt = {"turn_timeout": cfg.turn_timeout, "teacher": teacher} custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle", None) if custom_bundle_path is not None: if not os.path.exists(custom_bundle_path): build_task(TASK_DIRECTORY) shared_state = SharedParlAITaskState(world_opt=world_opt, onboarding_world_opt=world_opt) operator = Operator(db) operator.validate_and_run_config(run_config=cfg.mephisto, shared_state=shared_state) operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=cfg.monitoring_log_rate)
def test_base_task(self): # Paths expected_states_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'expected_states') expected_state_path = os.path.join(expected_states_folder, 'state.json') # # Setup build_task(task_directory=TASK_DIRECTORY) # Set up the config and database overrides = ['+turn_timeout=300'] self._set_up_config(task_directory=TASK_DIRECTORY, overrides=overrides) # Set up the operator and server teacher = get_teacher(self.config) world_opt = { "turn_timeout": self.config.turn_timeout, "teacher": teacher } shared_state = SharedParlAITaskState( world_opt=world_opt, onboarding_world_opt=world_opt) self._set_up_server(shared_state=shared_state) # Check that the agent states are as they should be with open(expected_state_path) as f: expected_state = json.load(f) self._test_agent_states( num_agents=1, agent_display_ids=AGENT_DISPLAY_IDS, agent_messages=AGENT_MESSAGES, form_messages=FORM_MESSAGES, form_task_data=FORM_TASK_DATA, expected_states=(expected_state, ), )