Esempio n. 1
0
def main(cfg: DictConfig) -> None:
    db, cfg = load_db_and_process_config(cfg)
    world_opt = get_world_opt(cfg)
    onboarding_world_opt = get_onboarding_world_opt(cfg)
    shared_state = SharedParlAITaskState(
        world_opt=world_opt, onboarding_world_opt=onboarding_world_opt)

    check_role_training_qualification(
        db=db,
        qname=world_opt[constants.ROLE_QUALIFICATION_NAME_KEY],
        requester_name=cfg.mephisto.provider.requester_name,
    )

    shared_state.task_config['minTurns'] = world_opt['min_turns']
    shared_state.task_config[
        'onboardingPersona'] = constants.ONBOARDING_PERSONA
    shared_state.worker_can_do_unit = get_worker_eval_function(
        world_opt[constants.ROLE_QUALIFICATION_NAME_KEY],
        onboarding_world_opt['onboarding_qualification'],
    )

    banned_words_fpath = cfg.mephisto.blueprint.banned_words_file
    add_banned_words_frontend_conf(shared_state, banned_words_fpath)

    operator = Operator(db)
    operator.validate_and_run_config(cfg.mephisto, shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=300)
    update_persona_use_counts_file(cfg.mephisto.blueprint.persona_counts_file,
                                   world_opt['prev_persona_count'])
Esempio n. 2
0
def main(cfg: DictConfig) -> None:
    db, cfg = load_db_and_process_config(cfg)

    parser = ParlaiParser(True, False)
    opt = parser.parse_args(
        list(chain.from_iterable(
            ('--' + k, v) for k, v in cfg.teacher.items())))
    agent = RepeatLabelAgent(opt)
    teacher = create_task(opt, agent).get_task_agent()

    world_opt = {"turn_timeout": cfg.turn_timeout, "teacher": teacher}

    custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle",
                                                    None)
    if custom_bundle_path is not None:
        assert os.path.exists(custom_bundle_path), (
            "Must build the custom bundle with `npm install; npm run dev` from within "
            f"the {TASK_DIRECTORY}/webapp directory in order to demo a custom bundle "
        )
        world_opt["send_task_data"] = True

    shared_state = SharedParlAITaskState(world_opt=world_opt,
                                         onboarding_world_opt=world_opt)

    operator = Operator(db)

    operator.validate_and_run_config(cfg.mephisto, shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
Esempio n. 3
0
        def test_base_task(self):

            # # Setup

            # Set up the config and database
            overrides = [
                '++mephisto.task.allowed_concurrent=0',
                '++mephisto.task.assignment_duration_in_seconds=600',
                '++mephisto.task.max_num_concurrent_units=0',
                '++mephisto.task.maximum_units_per_worker=0',
                '++num_turns=3',
                '++turn_timeout=300',
            ]
            self._set_up_config(task_directory=TASK_DIRECTORY,
                                overrides=overrides)

            # Set up the operator and server
            world_opt = {
                "num_turns": self.config.num_turns,
                "turn_timeout": self.config.turn_timeout,
            }
            shared_state = SharedParlAITaskState(
                world_opt=world_opt, onboarding_world_opt=world_opt)
            self._set_up_server(shared_state=shared_state)

            # Check that the agent states are as they should be
            self._test_agent_states(
                num_agents=2,
                agent_display_ids=AGENT_DISPLAY_IDS,
                agent_messages=AGENT_MESSAGES,
                form_messages=FORM_MESSAGES,
                form_task_data=FORM_TASK_DATA,
                expected_states=EXPECTED_STATES,
            )
        def test_base_task(self):

            # Paths
            expected_states_folder = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), 'expected_states')
            expected_state_path = os.path.join(expected_states_folder,
                                               'state.json')

            # # Setup

            build_task(task_directory=TASK_DIRECTORY)

            # Set up the config and database
            overrides = [
                'mephisto.blueprint.num_conversations=1',
                'mephisto.task.allowed_concurrent=0',
                '+turn_timeout=300',
            ]
            # TODO: remove all of these params once Hydra 1.1 is released with
            #  support for recursive defaults
            self._set_up_config(
                blueprint_type=BLUEPRINT_TYPE,
                task_directory=TASK_DIRECTORY,
                overrides=overrides,
            )

            # Set up the operator and server
            teacher = get_teacher(self.config)
            world_opt = {
                "turn_timeout": self.config.turn_timeout,
                "teacher": teacher
            }
            shared_state = SharedParlAITaskState(
                world_opt=world_opt, onboarding_world_opt=world_opt)
            self._set_up_server(shared_state=shared_state)

            # Check that the agent states are as they should be
            with open(expected_state_path) as f:
                expected_state = json.load(f)
            self._test_agent_states(
                num_agents=1,
                agent_display_ids=AGENT_DISPLAY_IDS,
                agent_messages=AGENT_MESSAGES,
                form_messages=FORM_MESSAGES,
                form_task_data=FORM_TASK_DATA,
                expected_states=(expected_state, ),
            )
def main(operator: "Operator", cfg: DictConfig) -> None:

    world_opt = {"num_turns": cfg.num_turns, "turn_timeout": cfg.turn_timeout}

    custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle",
                                                    None)
    if custom_bundle_path is not None:
        assert os.path.exists(custom_bundle_path), (
            "Must build the custom bundle with `npm install; npm run dev` from within "
            f"the {cfg.task_dir}/webapp directory in order to demo a custom bundle "
        )
        world_opt["send_task_data"] = True

    shared_state = SharedParlAITaskState(world_opt=world_opt,
                                         onboarding_world_opt=world_opt)

    operator.launch_task_run(cfg.mephisto, shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
Esempio n. 6
0
        def test_base_task(self):

            # # Setup

            # Set up the config and database
            overrides = [
                '+mephisto.blueprint.world_file=${task_dir}/demo_worlds.py',
                '+mephisto.blueprint.task_description_file=${task_dir}/task_description.html',
                '+mephisto.blueprint.num_conversations=1',
                '+mephisto.task.allowed_concurrent=0',
                '+mephisto.task.assignment_duration_in_seconds=600',
                '+mephisto.task.max_num_concurrent_units=0',
                '+mephisto.task.maximum_units_per_worker=0',
                '+num_turns=3',
                '+turn_timeout=300',
            ]
            # TODO: remove all of these params once Hydra 1.1 is released with support
            #  for recursive defaults
            self._set_up_config(
                blueprint_type=BLUEPRINT_TYPE,
                task_directory=TASK_DIRECTORY,
                overrides=overrides,
            )

            # Set up the operator and server
            world_opt = {
                "num_turns": self.config.num_turns,
                "turn_timeout": self.config.turn_timeout,
            }
            shared_state = SharedParlAITaskState(
                world_opt=world_opt, onboarding_world_opt=world_opt
            )
            self._set_up_server(shared_state=shared_state)

            # Check that the agent states are as they should be
            self._test_agent_states(
                num_agents=2,
                agent_display_ids=AGENT_DISPLAY_IDS,
                agent_messages=AGENT_MESSAGES,
                form_messages=FORM_MESSAGES,
                form_task_data=FORM_TASK_DATA,
                expected_states=EXPECTED_STATES,
            )
Esempio n. 7
0
def main(cfg: DictConfig) -> None:
    db, cfg = load_db_and_process_config(cfg)

    teacher = get_teacher(cfg)
    world_opt = {"turn_timeout": cfg.turn_timeout, "teacher": teacher}

    custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle",
                                                    None)
    if custom_bundle_path is not None:
        if not os.path.exists(custom_bundle_path):
            build_task(TASK_DIRECTORY)

    shared_state = SharedParlAITaskState(world_opt=world_opt,
                                         onboarding_world_opt=world_opt)

    operator = Operator(db)
    operator.validate_and_run_config(run_config=cfg.mephisto,
                                     shared_state=shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True,
                                         log_rate=cfg.monitoring_log_rate)
Esempio n. 8
0
def main(cfg: DictConfig) -> None:
    db, cfg = load_db_and_process_config(cfg)

    world_opt = {"num_turns": cfg.num_turns, "turn_timeout": cfg.turn_timeout}

    custom_bundle_path = cfg.mephisto.blueprint.get("custom_source_bundle",
                                                    None)
    if custom_bundle_path is not None:
        assert os.path.exists(custom_bundle_path), (
            "Must build the custom bundle with `npm install; npm run dev` from within "
            f"the {TASK_DIRECTORY}/webapp directory in order to demo a custom bundle "
        )
        world_opt["send_task_data"] = True

    shared_state = SharedParlAITaskState(world_opt=world_opt,
                                         onboarding_world_opt=world_opt)

    operator = Operator(db)

    operator.validate_and_run_config(cfg.mephisto, shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True,
                                         log_rate=cfg.monitoring_log_rate)
        def test_base_task(self):

            # Paths
            expected_states_folder = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), 'expected_states')
            expected_state_path = os.path.join(expected_states_folder,
                                               'state.json')

            # # Setup

            build_task(task_directory=TASK_DIRECTORY)

            # Set up the config and database
            overrides = ['+turn_timeout=300']

            self._set_up_config(task_directory=TASK_DIRECTORY,
                                overrides=overrides)

            # Set up the operator and server
            teacher = get_teacher(self.config)
            world_opt = {
                "turn_timeout": self.config.turn_timeout,
                "teacher": teacher
            }
            shared_state = SharedParlAITaskState(
                world_opt=world_opt, onboarding_world_opt=world_opt)
            self._set_up_server(shared_state=shared_state)

            # Check that the agent states are as they should be
            with open(expected_state_path) as f:
                expected_state = json.load(f)
            self._test_agent_states(
                num_agents=1,
                agent_display_ids=AGENT_DISPLAY_IDS,
                agent_messages=AGENT_MESSAGES,
                form_messages=FORM_MESSAGES,
                form_task_data=FORM_TASK_DATA,
                expected_states=(expected_state, ),
            )
Esempio n. 10
0
        def test_base_task(self):

            # # Setup

            # Set up the config and database
            overrides = [
                '+mephisto.blueprint.world_file=${task_dir}/demo_worlds.py',
                '+mephisto.blueprint.task_description_file=${task_dir}/task_description.html',
                '+mephisto.blueprint.num_conversations=1',
                '+mephisto.task.allowed_concurrent=0',
                '+num_turns=3',
                '+turn_timeout=300',
            ]
            # TODO: remove all of these params once Hydra 1.1 is released with support
            #  for recursive defaults
            self._set_up_config(
                blueprint_type=BLUEPRINT_TYPE,
                task_directory=TASK_DIRECTORY,
                overrides=overrides,
            )

            # Set up the operator and server
            world_opt = {
                "num_turns": self.config.num_turns,
                "turn_timeout": self.config.turn_timeout,
            }
            shared_state = SharedParlAITaskState(
                world_opt=world_opt, onboarding_world_opt=world_opt
            )
            self._set_up_server(shared_state=shared_state)

            # Set up the mock human agents
            agent_0_id, agent_1_id = self._register_mock_agents(num_agents=2)

            # # Feed messages to the agents

            # Set initial data
            self.server.request_init_data(agent_0_id)
            self.server.request_init_data(agent_1_id)

            # Have agents talk to each other
            for agent_0_text, agent_1_text in AGENT_MESSAGES:
                self._send_agent_message(
                    agent_id=agent_0_id,
                    agent_display_id=AGENT_0_DISPLAY_ID,
                    text=agent_0_text,
                )
                self._send_agent_message(
                    agent_id=agent_1_id,
                    agent_display_id=AGENT_1_DISPLAY_ID,
                    text=agent_1_text,
                )

            # Have agents fill out the form
            self.server.send_agent_act(
                agent_id=agent_0_id,
                act_content={
                    'text': FORM_PROMPTS['agent_0'],
                    'task_data': {'form_responses': FORM_RESPONSES['agent_0']},
                    'id': AGENT_0_DISPLAY_ID,
                    'episode_done': False,
                },
            )
            self.server.send_agent_act(
                agent_id=agent_1_id,
                act_content={
                    'text': FORM_PROMPTS['agent_1'],
                    'task_data': {'form_responses': FORM_RESPONSES['agent_1']},
                    'id': AGENT_1_DISPLAY_ID,
                    'episode_done': False,
                },
            )

            # Submit the HIT
            self.server.send_agent_act(
                agent_id=agent_0_id,
                act_content={
                    'task_data': {'final_data': {}},
                    'MEPHISTO_is_submit': True,
                },
            )
            self.server.send_agent_act(
                agent_id=agent_1_id,
                act_content={
                    'task_data': {'final_data': {}},
                    'MEPHISTO_is_submit': True,
                },
            )

            # # Check that the inputs and outputs are as expected

            state_0, state_1 = [
                agent.state.get_data() for agent in self.db.find_agents()
            ]
            actual_and_desired_states = [
                (state_0, DESIRED_STATE_AGENT_0),
                (state_1, DESIRED_STATE_AGENT_1),
            ]
            for actual_state, desired_state in actual_and_desired_states:
                assert actual_state['inputs'] == desired_state['inputs']
                assert len(actual_state['outputs']['messages']) == len(
                    desired_state['outputs']['messages']
                )
                for actual_message, desired_message in zip(
                    actual_state['outputs']['messages'],
                    desired_state['outputs']['messages'],
                ):
                    for key, desired_value in desired_message.items():
                        if key == 'timestamp':
                            pass  # The timestamp will obviously be different
                        elif key == 'data':
                            for key_inner, desired_value_inner in desired_message[
                                key
                            ].items():
                                if key_inner == 'message_id':
                                    pass  # The message ID will be different
                                else:
                                    self.assertEqual(
                                        actual_message[key][key_inner],
                                        desired_value_inner,
                                    )
                        else:
                            self.assertEqual(actual_message[key], desired_value)