def run_all(
    model_type="gpt2",
    device="cuda",
    out_dir=".",
    random_weights=False,
    template_indices=None,
):
    print("Model:", model_type, flush=True)
    # Set up all the potential combinations.
    professions = get_profession_list()
    templates = get_template_list(template_indices)
    intervention_types = get_intervention_types()
    # Initialize Model and Tokenizer.
    model = Model(device=device,
                  gpt2_version=model_type,
                  random_weights=random_weights)
    tokenizer = (
        GPT2Tokenizer if model.is_gpt2 else TransfoXLTokenizer
        if model.is_txl else XLNetTokenizer if model.is_xlnet else
        BertTokenizer if model.is_bert else DistilBertTokenizer if model.
        is_distilbert else RobertaTokenizer).from_pretrained(model_type)

    # Set up folder if it does not exist.
    dt_string = datetime.now().strftime("%Y%m%d")
    folder_name = dt_string + "_neuron_intervention"
    base_path = os.path.join(out_dir, "results", folder_name)
    if random_weights:
        base_path = os.path.join(base_path, "random")
    if not os.path.exists(base_path):
        os.makedirs(base_path)

    # Iterate over all possible templates.
    for temp in templates:
        print("Running template '{}' now...".format(temp), flush=True)
        # Fill in all professions into current template
        interventions = construct_interventions(temp, professions, tokenizer,
                                                device)
        # Consider all the intervention types
        for itype in intervention_types:
            print("\t Running with intervention: {}".format(itype), flush=True)
            # Run actual exp.
            intervention_results = model.neuron_intervention_experiment(
                interventions, itype, alpha=1.0)

            df = convert_results_to_pd(interventions, intervention_results)
            # Generate file name.
            temp_string = "_".join(temp.replace("{}", "X").split())
            model_type_string = model_type
            fname = "_".join([temp_string, itype, model_type_string])
            # Finally, save each exp separately.
            df.to_csv(os.path.join(base_path, fname + ".csv"))
示例#2
0
def intervene_attention(gpt2_version,
                        do_filter,
                        stat,
                        device='cuda',
                        filter_quantile=0.25,
                        random_weights=False):
    model = Model(output_attentions=True,
                  gpt2_version=gpt2_version,
                  device=device,
                  random_weights=random_weights)
    tokenizer = GPT2Tokenizer.from_pretrained(gpt2_version)

    interventions, json_data = get_interventions_winogender(
        gpt2_version, do_filter, stat, model, tokenizer, device,
        filter_quantile)
    results = perform_interventions(interventions, model)
    json_data['mean_total_effect'] = DataFrame(results).total_effect.mean()
    json_data['mean_model_indirect_effect'] = DataFrame(
        results).indirect_effect_model.mean()
    json_data['mean_model_direct_effect'] = DataFrame(
        results).direct_effect_model.mean()
    filter_name = 'filtered' if do_filter else 'unfiltered'
    if random_weights:
        gpt2_version += '_random'
    fname = f"winogender_data/attention_intervention_{stat}_{gpt2_version}_{filter_name}.json"
    json_data['results'] = results
    with open(fname, 'w') as f:
        json.dump(json_data, f)
def intervene_attention(gpt2_version, do_filter, stat, device='cuda',
                        filter_quantile=0.25, random_weights=False,
                        masking_approach=1):
    model = Model(output_attentions=True, gpt2_version=gpt2_version,
                  device=device, random_weights=random_weights,
                  masking_approach=masking_approach)
    tokenizer = (GPT2Tokenizer if model.is_gpt2 else
                 TransfoXLTokenizer if model.is_txl else
                 XLNetTokenizer if model.is_xlnet else
                 BertTokenizer if model.is_bert else
                 DistilBertTokenizer if model.is_distilbert else
                 RobertaTokenizer).from_pretrained(gpt2_version)

    interventions, json_data = get_interventions_winogender(gpt2_version, do_filter, stat, model, tokenizer,
                                                            device, filter_quantile)
    results = perform_interventions(interventions, model)
    json_data['mean_total_effect'] = DataFrame(results).total_effect.mean()
    json_data['mean_model_indirect_effect'] = DataFrame(results).indirect_effect_model.mean()
    json_data['mean_model_direct_effect'] = DataFrame(results).direct_effect_model.mean()
    filter_name = 'filtered' if do_filter else 'unfiltered'
    if random_weights:
        gpt2_version += '_random'
    if model.is_gpt2 or model.is_txl or model.is_xlnet:
        fname = f"winogender_data/attention_intervention_{stat}_{gpt2_version}_{filter_name}.json"
    else:
        fname = f"winogender_data/attention_intervention_{stat}_{gpt2_version}_{filter_name}_{masking_approach}.json"
    json_data['results'] = results
    with open(fname, 'w') as f:
        json.dump(json_data, f)
    ap.add_argument('--layer', type=int, default=-1)
    ap.add_argument('--out_dir', type=str, default='results')

    args = ap.parse_args()

    algo = args.algo
    k = args.k
    layer = args.layer
    out_dir = args.out_dir
    model_type = args.model_type

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    tokenizer = GPT2Tokenizer.from_pretrained(model_type)
    model = Model(device='cuda')
    DEVICE = 'cuda'

    templates = get_template_list()

    if args.algo == 'topk':
        marg_contrib_path = out_dir + "/marg_contrib.pickle"
        if os.path.exists(marg_contrib_path):
            print('Using cached marginal contribution')
            marg_contrib = pickle.load(open(marg_contrib_path, "rb"))
            layer_list = marg_contrib['layer']
            neuron_list = marg_contrib['neuron']
        else:
            print('Computing marginal contribution')
            layer_list, neuron_list = get_all_contrib(templates, tokenizer,
                                                      out_dir)
    algo = args.algo
    k = args.k
    data = args.data
    out_dir = args.out_dir

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    if args.data == 'winobias':
        data_ext = 'wb'
    else:
        data_ext = 'wg'

    tokenizer = GPT2Tokenizer.from_pretrained(model_type)
    model = Model(output_attentions=True,
                  device='cuda',
                  gpt2_version=model_type)

    if data == 'winobias':
        interventions, _ = get_interventions_winobias(model_type,
                                                      do_filter=True,
                                                      split='dev',
                                                      model=model,
                                                      tokenizer=tokenizer,
                                                      device='cuda')
    else:
        interventions, _ = get_interventions_winogender(model_type,
                                                        do_filter=True,
                                                        stat='bls',
                                                        model=model,
                                                        tokenizer=tokenizer,
        candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
            x_alt, intervention.candidates_tok)

    odds_base = candidate2_base_prob / candidate1_base_prob
    odds_alt = candidate2_alt_prob / candidate1_alt_prob
    return odds_alt / odds_base


def topk_indices(arr, k):
    """Return indices of top-k values"""
    return (-arr).argsort(axis=None)[:k]


if __name__ == "__main__":
    from transformers import GPT2Tokenizer
    from experiment import Intervention, Model
    from pandas import DataFrame
    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    model = Model(output_attentions=True)

    # Test experiment
    interventions = [
        Intervention(tokenizer, "The doctor asked the nurse a question. {}",
                     ["He", "She"], ["asked", "answered"]),
        Intervention(tokenizer, "The doctor asked the nurse a question. {}",
                     ["He", "She"], ["requested", "responded"])
    ]

    results = perform_interventions(interventions, model)
    report_interventions_summary_by_layer(results)
def run_all(
    model_type="gpt2",
    device="cuda",
    out_dir=".",
    grammar_file=None,
    structure=None,
    random_weights=False,
    template_indices=None,
):
    print("Model:", model_type, flush=True)
    # Set up all the potential combinations.
    if grammar_file is not None:
        if structure is None:
            raise Exception(
                "Error: grammar file given but no structure specified")
        grammar = read_grammar(grammar_file)
        professions = get_profession_list(grammar=grammar, structure=structure)
        templates = get_template_list(structure=structure, grammar=grammar)
        intervention_types = get_intervention_types(bias_type="structural")
    else:
        professions = get_profession_list()
        templates = get_template_list(template_indices)
        intervention_types = get_intervention_types()

    # Initialize Model and Tokenizer.
    tokenizer = GPT2Tokenizer.from_pretrained(model_type)
    model = Model(device=device,
                  gpt2_version=model_type,
                  random_weights=random_weights)

    # Set up folder if it does not exist.
    dt_string = datetime.now().strftime("%Y%m%d")
    folder_name = dt_string + "_neuron_intervention"
    base_path = os.path.join(out_dir, "results", folder_name)
    if random_weights:
        base_path = os.path.join(base_path, "random")
    if not os.path.exists(base_path):
        os.makedirs(base_path)

    # Iterate over all possible templates.
    for number in ('sing', 'pl'):
        for temp in templates[number]:
            print("Running template '{}' now...".format(temp), flush=True)
            # Fill in all professions into current template
            if structure == "simple_agreement":
                other_number = 'sing' if number == 'pl' else 'pl'
                interventions = construct_interventions(
                    temp,
                    professions[number],
                    tokenizer,
                    device,
                    structure=structure,
                    number=number,
                    subs=professions[other_number])
            else:
                interventions = construct_interventions(temp,
                                                        professions[number],
                                                        tokenizer,
                                                        device,
                                                        structure=structure,
                                                        number=number)
            # Consider all the intervention types
            for itype in intervention_types:
                print("\t Running with intervention: {}".format(itype),
                      flush=True)
                # Run actual exp.
                intervention_results = model.neuron_intervention_experiment(
                    interventions, itype, alpha=1.0)

                df = convert_results_to_pd(interventions, intervention_results)
                # Generate file name.
                temp_string = "_".join(temp.replace("{}", "X").split())
                model_type_string = model_type
                fname = "_".join([temp_string, itype, model_type_string])
                # Finally, save each exp separately.
                df.to_csv(os.path.join(base_path, fname + ".csv"))