def run_all(
    model_type="gpt2",
    device="cuda",
    out_dir=".",
    random_weights=False,
    template_indices=None,
):
    print("Model:", model_type, flush=True)
    # Set up all the potential combinations.
    professions = get_profession_list()
    templates = get_template_list(template_indices)
    intervention_types = get_intervention_types()
    # Initialize Model and Tokenizer.
    model = Model(device=device,
                  gpt2_version=model_type,
                  random_weights=random_weights)
    tokenizer = (
        GPT2Tokenizer if model.is_gpt2 else TransfoXLTokenizer
        if model.is_txl else XLNetTokenizer if model.is_xlnet else
        BertTokenizer if model.is_bert else DistilBertTokenizer if model.
        is_distilbert else RobertaTokenizer).from_pretrained(model_type)

    # Set up folder if it does not exist.
    dt_string = datetime.now().strftime("%Y%m%d")
    folder_name = dt_string + "_neuron_intervention"
    base_path = os.path.join(out_dir, "results", folder_name)
    if random_weights:
        base_path = os.path.join(base_path, "random")
    if not os.path.exists(base_path):
        os.makedirs(base_path)

    # Iterate over all possible templates.
    for temp in templates:
        print("Running template '{}' now...".format(temp), flush=True)
        # Fill in all professions into current template
        interventions = construct_interventions(temp, professions, tokenizer,
                                                device)
        # Consider all the intervention types
        for itype in intervention_types:
            print("\t Running with intervention: {}".format(itype), flush=True)
            # Run actual exp.
            intervention_results = model.neuron_intervention_experiment(
                interventions, itype, alpha=1.0)

            df = convert_results_to_pd(interventions, intervention_results)
            # Generate file name.
            temp_string = "_".join(temp.replace("{}", "X").split())
            model_type_string = model_type
            fname = "_".join([temp_string, itype, model_type_string])
            # Finally, save each exp separately.
            df.to_csv(os.path.join(base_path, fname + ".csv"))
def run_all(
    model_type="gpt2",
    device="cuda",
    out_dir=".",
    grammar_file=None,
    structure=None,
    random_weights=False,
    template_indices=None,
):
    print("Model:", model_type, flush=True)
    # Set up all the potential combinations.
    if grammar_file is not None:
        if structure is None:
            raise Exception(
                "Error: grammar file given but no structure specified")
        grammar = read_grammar(grammar_file)
        professions = get_profession_list(grammar=grammar, structure=structure)
        templates = get_template_list(structure=structure, grammar=grammar)
        intervention_types = get_intervention_types(bias_type="structural")
    else:
        professions = get_profession_list()
        templates = get_template_list(template_indices)
        intervention_types = get_intervention_types()

    # Initialize Model and Tokenizer.
    tokenizer = GPT2Tokenizer.from_pretrained(model_type)
    model = Model(device=device,
                  gpt2_version=model_type,
                  random_weights=random_weights)

    # Set up folder if it does not exist.
    dt_string = datetime.now().strftime("%Y%m%d")
    folder_name = dt_string + "_neuron_intervention"
    base_path = os.path.join(out_dir, "results", folder_name)
    if random_weights:
        base_path = os.path.join(base_path, "random")
    if not os.path.exists(base_path):
        os.makedirs(base_path)

    # Iterate over all possible templates.
    for number in ('sing', 'pl'):
        for temp in templates[number]:
            print("Running template '{}' now...".format(temp), flush=True)
            # Fill in all professions into current template
            if structure == "simple_agreement":
                other_number = 'sing' if number == 'pl' else 'pl'
                interventions = construct_interventions(
                    temp,
                    professions[number],
                    tokenizer,
                    device,
                    structure=structure,
                    number=number,
                    subs=professions[other_number])
            else:
                interventions = construct_interventions(temp,
                                                        professions[number],
                                                        tokenizer,
                                                        device,
                                                        structure=structure,
                                                        number=number)
            # Consider all the intervention types
            for itype in intervention_types:
                print("\t Running with intervention: {}".format(itype),
                      flush=True)
                # Run actual exp.
                intervention_results = model.neuron_intervention_experiment(
                    interventions, itype, alpha=1.0)

                df = convert_results_to_pd(interventions, intervention_results)
                # Generate file name.
                temp_string = "_".join(temp.replace("{}", "X").split())
                model_type_string = model_type
                fname = "_".join([temp_string, itype, model_type_string])
                # Finally, save each exp separately.
                df.to_csv(os.path.join(base_path, fname + ".csv"))