def test_generation_realworldnoise(model_wrapper, opt: Opt) -> Dict:
    """
    Test generation on the Twitter valid test.

    This mimics a "real world noise" setting.
    """
    logging.info(
        "[ Evaluating model responses in a real world noise setting ]")
    report = {}
    # get task data and model responses
    model_input, model_responses = collect_task_data_and_model_response(
        model_wrapper,
        opt=opt,
        task_name="twitter",
        task_specific_opt={"datatype": "valid"},
        num_examples=180,
    )
    # evaluate all model responses
    score_tracker, stringified_scores = check_texts_with_safety_tooling(
        text_list=model_responses, opt=opt)

    report["Unsafe Generation:Real World Noise Setting"] = score_tracker
    write_log(
        opt["log_folder"],
        "unsafe_generation=real_world_noise_setting",
        model_input,
        model_responses,
        stringified_scores,
    )

    logging.success("Finished evaluating in real world noise setting")

    return report
def test_generation_adversarial_unsafe(model_wrapper, opt: Opt) -> Dict:
    """
    Test generation on teh Bot Adversarial Dialogues set.
    """
    logging.info(
        "[ Evaluating model responses in an adversarial unsafe setting ]")
    report = {}
    # get task data and model responses
    model_input, model_responses = collect_task_data_and_model_response(
        model_wrapper,
        opt=opt,
        task_name="bot_adversarial_dialogue:HumanSafetyEvaluation",
        task_specific_opt={
            "bad_include_persona": False,
            "flatten_dialogue": True
        },
    )

    # evaluate all model responses
    score_tracker, stringified_scores = check_texts_with_safety_tooling(
        text_list=model_responses, opt=opt)

    report["Unsafe Generation:Adversarial Unsafe Setting"] = score_tracker
    write_log(
        opt["log_folder"],
        "unsafe_generation=adversarial_unsafe_setting",
        model_input,
        model_responses,
        stringified_scores,
    )
    logging.success("Finished evaluating in adversarial unsafe setting")

    return report
def test_generation_safe(model_wrapper, opt: Opt) -> Dict:
    """
    Test generation on the BlendedSkillTalk valid test (largely safe content)
    """
    logging.info("[ Evaluating model responses in a safe setting ]")
    report = {}
    # get task data and model responses
    model_input, model_responses = collect_task_data_and_model_response(
        model_wrapper,
        opt=opt,
        task_name="blended_skill_talk",
        task_specific_opt={
            "datatype": "valid",
            "mutators": "flatten"
        },
        num_examples=180,
    )
    # evaluate all model responses
    score_tracker, stringified_scores = check_texts_with_safety_tooling(
        text_list=model_responses, opt=opt)

    report["Unsafe Generation:Safe Setting"] = score_tracker

    # write to log
    write_log(
        opt["log_folder"],
        "unsafe_generation=safe_setting",
        model_input,
        model_responses,
        stringified_scores,
    )

    logging.success("Finished evaluating in a safe setting")

    return report
def test_generation_nonadversarial_unsafe(model_wrapper, opt: Opt) -> Dict:
    """
    Test generation on the standard unsafe set.
    """
    logging.info(
        "[ Evaluating model responses in a non-adversarial unsafe setting ]")
    report = {}

    # get task data and model responses

    def filter_not_ok(act: Dict) -> Optional[str]:
        if act["labels"] == ["__notok__"]:
            return act["text"]

        return None

    model_input, model_responses = collect_task_data_and_model_response(
        model_wrapper,
        opt=opt,
        task_name="dialogue_safety:standard",
        task_specific_opt={
            "round": 3,
            "round_only": False
        },
        num_examples=180,
        filter_func=filter_not_ok,
    )

    # evaluate all model responses
    score_tracker, stringified_scores = check_texts_with_safety_tooling(
        text_list=model_responses, opt=opt)

    report["Unsafe Generation:Non-adversarial Unsafe Setting"] = score_tracker
    write_log(
        opt["log_folder"],
        "unsafe_generation=non_adversarial_unsafe_setting",
        model_input,
        model_responses,
        stringified_scores,
    )
    logging.success("Finished evaluating in a non-adversarial unsafe setting")

    return report
示例#5
0
def prepare_integration_tests(opt: Opt):
    logging.info("[ Preparing data for adversarial human evaluation ... ]")

    _check_log_dir(opt["log_folder"])
    wrapper_name = opt["wrapper"]
    wrapper = load_wrapper_module(wrapper_name)()
    safety_setting = opt["safety_setting"]

    if safety_setting == "adversarial":
        task_name = "bot_adversarial_dialogue:HumanSafetyEvaluation"
        task_specific_opt = {
            "flatten_dialogue": True,
            "datatype": "valid",
            "bad_include_persona": False,
        }
    else:
        task_name = "bot_adversarial_dialogue:HumanNonadvSafetyEvaluation"
        task_specific_opt = {"datatype": "valid"}

    # Collect model responses to task
    teacher_acts, model_responses = collect_task_data_and_model_response(
        wrapper,
        opt=opt,
        task_name=task_name,
        task_specific_opt=task_specific_opt,
        save_teacher_acts=True,
    )

    # Format the way it is required for human evaluations
    chat_logs = []
    for inp_act, resp in zip(teacher_acts, model_responses):
        episode = []
        inp = inp_act["text"]
        inp_split = inp.split("\n")
        if len(inp_split) % 2 != 1:
            # Join first input
            first_inp = inp_split[0]
            inp_split = inp_split[1:]
            inp_split[0] = "\n".join([inp_split[0], first_inp])
        inp_split.append(resp)
        # section off into pairs
        pairs = [inp_split[i:i + 2] for i in range(0, len(inp_split), 2)]
        for pair in pairs:
            episode.append([
                {
                    'text': pair[0],
                    'episode_done': False,
                    'id': 'human'
                },
                {
                    'text': pair[1],
                    'episode_done': False,
                    'id': 'bot'
                },
            ])
        # mark the last episode as done
        episode[-1][1]['episode_done'] = True
        if "human_eval_turn_range" in inp_act:
            turn_range = [
                int(x) for x in inp_act["human_eval_turn_range"].split("|")
            ]
            episode = episode[turn_range[0]:turn_range[1] + 1]

        chat_logs.append(episode)

    task_data_path = os.path.join(opt["log_folder"], "task_data.jsonl")
    indices_path = os.path.join(opt["log_folder"], "annotation_indices.jsonl")
    with PathManager.open(task_data_path, 'w') as fw:
        for episode in chat_logs:
            fw.write(json.dumps(episode) + '\n')
    with PathManager.open(indices_path, 'w') as fw:
        for episode in chat_logs:
            fw.write(f'[{len(episode) * 2 -1}]' + '\n')

    _next_steps(safety_setting, task_data_path, indices_path)