Esempio n. 1
0
def per_doc_score():
    filename = "tlm_view.pickle"
    html_writer = HtmlVisualizer("per_doc_score.html", dark_mode=False)

    data = EstimatorPredictionViewerGosford(filename)
    amp = 20
    small_threshold = 40
    for inst_i, entry in enumerate(data):
        if inst_i > 1000:
            break
        scores = entry.get_vector("priority_score")

        tokens = entry.get_mask_resolved_input_mask_with_input()
        cells = data.cells_from_tokens(tokens)
        if len(cells) < small_threshold:
            continue
        avg_score = average(scores)
        if -0.11 > avg_score > -0.30:
            continue
        print(average(scores))
        html_writer.write_headline(avg_score)
        rows = []
        row = []
        for idx, cell in enumerate(cells):
            row.append(cell)
            if len(row) == 20:
                html_writer.write_table([row])
                row = []
Esempio n. 2
0
def dev():
    train_data_feeder = load_cache("train_data_feeder")
    tokenizer = tokenizer_wo_tf.FullTokenizer(
        os.path.join(data_path, "bert_voca.txt"))

    html_writer = HtmlVisualizer("nli_w_dict.html", dark_mode=False)

    for _ in range(100):
        batch = train_data_feeder.get_random_batch(1)

        input_ids, input_mask, segment_ids, d_input_ids, d_input_mask, d_location_ids, y = batch

        tokens = tokenizer.convert_ids_to_tokens(input_ids[0])

        for i in range(len(tokens)):
            if i is not 0 and i in d_location_ids:
                tokens[i] = "<b>{}</b>".format(tokens[i])
            if tokens[i] == "[unused3]":
                tokens[i] = "[SEP]\n"

        s = tokenizer_wo_tf.pretty_tokens(tokens)
        html_writer.write_headline("Input")
        html_writer.write_paragraph(s)

        d_tokens = tokenizer.convert_ids_to_tokens(d_input_ids[0])
        for i in range(len(d_tokens)):
            if tokens[i] == "[unused5]":
                tokens[i] = "<br>\n"

        s = tokenizer_wo_tf.pretty_tokens(d_tokens)
        html_writer.write_headline("Dict def")
        html_writer.write_paragraph(s)

    html_writer.close()
Esempio n. 3
0
def visualize_prediction_data(data_id):
    tokenizer = get_tokenizer()
    num_samples_list = open(
        os.path.join(working_path, "entry_prediction_n", data_id),
        "r").readlines()
    p = os.path.join(working_path, "entry_loss",
                     "entry{}.pickle".format(data_id))
    loss_outputs_list = pickle.load(open(p, "rb"))
    print("Loaded input data")
    loss_outputs = []
    for e in loss_outputs_list:
        loss_outputs.extend(e["masked_lm_example_loss"])
    print("Total of {} loss outputs".format(len(loss_outputs)))
    instance_idx = 0
    feature_itr = load_record_v2(
        os.path.join(working_path, "entry_prediction_tf.done", data_id))
    n = len(num_samples_list)
    n = 100
    html = HtmlVisualizer("entry_prediction.html")
    for i in range(n):
        n_sample = int(num_samples_list[i])
        assert n_sample > 0
        first_inst = feature_itr.__next__()
        feature = Feature2Text(first_inst, tokenizer)

        html.write_headline("Input:")
        html.write_paragraph(feature.get_input_as_text(True, True))
        html.write_headline("Word:" + feature.get_selected_word_text())

        if instance_idx + n_sample >= len(loss_outputs):
            break

        if n_sample == 1:
            continue

        rows = []
        no_dict_loss = loss_outputs[instance_idx]
        row = [Cell(no_dict_loss, 0), Cell("")]
        rows.append(row)
        instance_idx += 1
        for j in range(1, n_sample):
            feature = Feature2Text(feature_itr.__next__(), tokenizer)
            def_cell = Cell(feature.get_def_as_text())
            loss = loss_outputs[instance_idx]
            hl_score = 100 if loss < no_dict_loss * 0.9 else 0
            row = [Cell(loss, hl_score), def_cell]
            rows.append(row)
            instance_idx += 1

        html.write_table(rows)
Esempio n. 4
0
def show(all_info):
    html = HtmlVisualizer("cppnc.html")
    cnt = 0
    for cpid, value in all_info.items():
        score, rel_score, info = value[0]
        html.write_headline("Claim {}: {}".format(info['cid'], info['c_text']))
        html.write_headline("Perspective: " + info['p_text'])

        for score, rel_score, info in value:
            html.write_headline("score: {}".format(score))
            html.write_headline("rel_score: {}".format(rel_score))
            html.write_paragraph(" ".join(info['passage']))
        cnt += 1

        if cnt > 10000:
            break
Esempio n. 5
0
def main():
    print("Loading scores...")
    cid_grouped: Dict[str, Dict[str, List[Dict]]] = load_cppnc_score_wrap()
    baseline_cid_grouped = load_baseline("train_baseline")
    gold = get_claim_perspective_id_dict()
    tokenizer = get_tokenizer()
    claim_d = load_train_claim_d()

    print("Start analyzing")
    html = HtmlVisualizer("cppnc_value_per_token_score.html")
    claim_cnt = 0
    for cid, pid_entries_d in cid_grouped.items():
        pid_entries_d: Dict[str, List[Dict]] = pid_entries_d
        pid_entries: List[Tuple[str, List[Dict]]] = list(pid_entries_d.items())
        baseline_pid_entries = baseline_cid_grouped[int(cid)]
        baseline_score_d = fetch_score_per_pid(baseline_pid_entries)
        gold_pids = gold[int(cid)]

        ret = collect_score_per_doc(baseline_score_d, get_score_from_entry, gold_pids,
                                                                  pid_entries)
        passage_tokens_d = collect_passage_tokens(pid_entries)
        doc_info_d: Dict[int, Tuple[str, int]] = ret[0]
        doc_value_arr: List[List[float]] = ret[1]

        kdp_result_grouped = defaultdict(list)
        for doc_idx, doc_values in enumerate(doc_value_arr):
            doc_id, passage_idx = doc_info_d[doc_idx]
            avg_score = average(doc_values)
            kdp_result = doc_id, passage_idx, avg_score
            kdp_result_grouped[doc_id].append(kdp_result)

        s = "{} : {}".format(cid, claim_d[int(cid)])
        html.write_headline(s)
        claim_cnt += 1
        if claim_cnt > 10:
            break

        scores: List[float] = list([r[2] for r in doc_value_arr])

        foreach(html.write_paragraph, lmap(str, scores))

        for doc_id, kdp_result_list in kdp_result_grouped.items():
            html.write_headline(doc_id)
            tokens, per_token_score = combine_collect_score(tokenizer, doc_id, passage_tokens_d, kdp_result_list)
            str_tokens = tokenizer.convert_ids_to_tokens(tokens)
            row = cells_from_tokens(str_tokens)
            for idx in range(len(str_tokens)):
                score = per_token_score[idx][0]
                norm_score = min(abs(score) * 10000, 100)
                color = "B" if score > 0 else "R"
                row[idx].highlight_score = norm_score
                row[idx].target_color = color

            rows = [row]
            nth = 0
            any_score_found = True
            while any_score_found:
                any_score_found = False
                score_list = []
                for idx in range(len(str_tokens)):
                    if nth < len(per_token_score[idx]):
                        score = per_token_score[idx][nth]
                        any_score_found = True
                    else:
                        score = "-"
                    score_list.append(score)

                def get_cell(score):
                    if score == "-":
                        return Cell("-")
                    else:
                        # 0.01 -> 100
                        norm_score = min(abs(score) * 10000, 100)
                        color = "B" if score > 0 else "R"
                        return Cell("", highlight_score=norm_score, target_color=color)

                nth += 1
                if any_score_found:
                    row = lmap(get_cell, score_list)
                    rows.append(row)
            html.multirow_print_from_cells_list(rows)
Esempio n. 6
0
def analyze_gradient(data, tokenizer):
    gradients = data['gradients']
    d_input_ids = data['d_input_ids']
    mask_input_ids = data['masked_input_ids']
    masked_lm_positions = data["masked_lm_positions"]

    n_inst, seq_len = mask_input_ids.shape
    n_inst2, def_len = d_input_ids.shape

    assert n_inst == n_inst2

    def_len = 256
    hidden_dim = 768
    reshaped_grad = reshape_gradienet(gradients, n_inst, def_len, hidden_dim)
    print(reshaped_grad.shape)

    n_pred = reshaped_grad.shape[1]

    grad_per_token = np.sum(np.abs(reshaped_grad), axis=3)

    html_writer = HtmlVisualizer("dict_grad.html", dark_mode=False)

    for inst_idx in range(n_inst):
        tokens = tokenizer.convert_ids_to_tokens(mask_input_ids[inst_idx])
        #ans_tokens = tokenizer.convert_ids_to_tokens(input_ids[inst_idx])
        for i in range(len(tokens)):
            if tokens[i] == "[MASK]":
                tokens[i] = "[MASK_{}]".format(i)
            if tokens[i] == "[SEP]":
                tokens[i] = "[SEP]<br>"
        def_tokens = tokenizer.convert_ids_to_tokens(d_input_ids[inst_idx])
        s = tokenizer_wo_tf.pretty_tokens(tokens)

        lines = []

        grad_total_max = 0
        for pred_idx in range(n_pred):
            row = []
            max_val = max(grad_per_token[inst_idx, pred_idx])
            total = sum(grad_per_token[inst_idx, pred_idx])
            mask_pos = masked_lm_positions[inst_idx, pred_idx]

            if total > grad_total_max:
                grad_total_max = total

            row.append(Cell(mask_pos))
            row.append(Cell(int(total)))

            for def_idx in range(def_len):
                term = def_tokens[def_idx]
                cont_right = def_idx + 1 < def_len and def_tokens[
                    def_idx][:2] == "##"
                cont_left = term[:2] == "##"

                space_left = "&nbsp;" if not cont_left else ""
                space_right = "&nbsp;" if not cont_right else ""

                if term == "[PAD]":
                    break
                if term == "[unused5]":
                    term = "[\\n]"

                score = grad_per_token[inst_idx, pred_idx,
                                       def_idx] / (hidden_dim * 2)
                bg_color = get_color(score)

                row.append(Cell(term, score, not cont_left, not cont_right))
                print("{}({})".format(
                    term, grad_per_token[inst_idx, pred_idx, def_idx]),
                      end=" ")

            lines.append((mask_pos, row))
            print("")
        lines.sort(key=lambda x: x[0])

        s = s.replace("[unused4]", "<b>DictTerm</b>")
        html_writer.write_paragraph(s)

        if grad_total_max > 5000000:
            html_writer.write_headline("HIGH Gradient")

        rows = right(lines)
        html_writer.write_table(rows)

        print("----------")
    html_writer.close()
Esempio n. 7
0
def doc_lm_scoring():
    gold = get_claim_perspective_id_dict()

    d_ids = list(load_train_claim_ids())
    claims: List[Dict] = get_claims_from_ids(d_ids)
    claims = claims
    top_n = 10
    q_res_path = FilePath(
        "/mnt/nfs/work3/youngwookim/data/perspective/train_claim/q_res_100")
    ranked_list: Dict[
        str, List[SimpleRankedListEntry]] = load_galago_ranked_list(q_res_path)
    preload_docs(ranked_list, claims, top_n)
    claim_lms = build_gold_lms(claims)
    claim_lms_d = {lm.cid: lm for lm in claim_lms}
    bg_lm = average_counters(lmap(lambda x: x.LM, claim_lms))
    log_bg_lm = get_lm_log(bg_lm)

    stopwords = load_stopwords_for_query()
    alpha = 0.5

    html_visualizer = HtmlVisualizer("doc_lm_doc_level.html")

    tokenizer = PCTokenizer()
    random_passages = []
    num_pos_sum = 0
    num_pos_exists = 0
    for c in claims:
        q_res: List[SimpleRankedListEntry] = ranked_list[str(c['cId'])]
        html_visualizer.write_headline("{} : {}".format(c['cId'], c['text']))
        # for cluster in clusters:
        #     html_visualizer.write_paragraph("---")
        #     p_text_list: List[str] = lmap(perspective_getter, cluster)
        #     for text in p_text_list:
        #         html_visualizer.write_paragraph(text)
        #     html_visualizer.write_paragraph("---")
        claim_lm = claim_lms_d[c['cId']]
        topic_lm_prob = smooth(claim_lm.LM, bg_lm, alpha)
        log_topic_lm = get_lm_log(smooth(claim_lm.LM, bg_lm, alpha))
        log_odd: Counter = subtract(log_topic_lm, log_bg_lm)

        claim_text = c['text']
        claim_tokens = tokenizer.tokenize_stem(claim_text)

        scores = []
        for t in claim_tokens:
            if t in log_odd:
                scores.append(log_odd[t])
        threshold = average(scores)

        s = "\t".join(left(log_odd.most_common(30)))
        html_visualizer.write_paragraph("Log odd top: " + s)
        not_found = set()

        def get_log_odd(x):
            x = tokenizer.stemmer.stem(x)
            if x not in log_odd:
                not_found.add(x)
            return log_odd[x]

        def get_probs(x):
            x = tokenizer.stemmer.stem(x)
            if x not in topic_lm_prob:
                not_found.add(x)
            return topic_lm_prob[x]

        def get_passage_score(p):
            return sum([log_odd[tokenizer.stemmer.stem(t)]
                        for t in p]) / len(p) if len(p) > 0 else 0

        passages = iterate_passages(q_res, top_n, get_passage_score)

        passages.sort(key=lambda x: x[1], reverse=True)
        html_visualizer.write_paragraph("Threshold {}".format(threshold))

        top5_scores = right(passages[:5])
        bot5_scores = right(passages[-5:])

        if len(random_passages) > 5:
            random_sel_pssages = random.choices(random_passages, k=5)
        else:
            random_sel_pssages = []
        random5_scores = lmap(get_passage_score, random_sel_pssages)

        def score_line(scores):
            return " ".join(lmap(two_digit_float, scores))

        html_visualizer.write_paragraph("top 5: " + score_line(top5_scores))
        html_visualizer.write_paragraph("bot 5: " + score_line(bot5_scores))
        html_visualizer.write_paragraph("random 5: " +
                                        score_line(random5_scores))

        num_pos = len(lfilter(lambda x: x[1] > 0, passages))
        num_pos_sum += num_pos
        if num_pos > 0:
            num_pos_exists += 1

        def print_doc(doc, html_visualizer, score):
            cells = lmap(lambda x: get_cell_from_token(x, get_log_odd(x)), doc)
            html_visualizer.write_headline("score={}".format(score))
            html_visualizer.multirow_print(cells, width=20)

        random_passages.extend(left(passages))
        if threshold < 0:
            continue
        for doc, score in passages:
            if score < 0:
                break
            print_doc(doc, html_visualizer, score)

        html_visualizer.write_headline("Bottom 5")
        for doc, score in passages[-5:]:
            print_doc(doc, html_visualizer, score)

    print("{} claims. {} docs on {} claims".format(len(claims), num_pos_sum,
                                                   num_pos_exists))
Esempio n. 8
0
def join_docs_and_lm():
    gold = get_claim_perspective_id_dict()

    d_ids = list(load_train_claim_ids())
    claims: List[Dict] = get_claims_from_ids(d_ids)
    claims = claims[:10]
    top_n = 10
    q_res_path = FilePath(
        "/mnt/nfs/work3/youngwookim/data/perspective/train_claim/q_res_100")
    ranked_list: Dict[
        str, List[SimpleRankedListEntry]] = load_galago_ranked_list(q_res_path)
    preload_docs(ranked_list, claims, top_n)
    claim_lms = build_gold_lms(claims)
    claim_lms_d = {lm.cid: lm for lm in claim_lms}
    bg_lm = average_counters(lmap(lambda x: x.LM, claim_lms))
    log_bg_lm = get_lm_log(bg_lm)

    stopwords.update([".", ",", "!", "?"])

    alpha = 0.1

    html_visualizer = HtmlVisualizer("doc_lm_joined.html")

    def get_cell_from_token2(token, probs):
        if token.lower() in stopwords:
            probs = 0
        probs = probs * 1e5
        s = min(100, probs)
        c = Cell(token, s)
        return c

    tokenizer = PCTokenizer()
    for c in claims:
        q_res: List[SimpleRankedListEntry] = ranked_list[str(c['cId'])]
        html_visualizer.write_headline("{} : {}".format(c['cId'], c['text']))

        clusters: List[List[int]] = gold[c['cId']]

        for cluster in clusters:
            html_visualizer.write_paragraph("---")
            p_text_list: List[str] = lmap(perspective_getter, cluster)
            for text in p_text_list:
                html_visualizer.write_paragraph(text)
            html_visualizer.write_paragraph("---")
        claim_lm = claim_lms_d[c['cId']]
        topic_lm_prob = smooth(claim_lm.LM, bg_lm, alpha)
        log_topic_lm = get_lm_log(smooth(claim_lm.LM, bg_lm, alpha))
        log_odd: Counter = subtract(log_topic_lm, log_bg_lm)

        s = "\t".join(left(log_odd.most_common(30)))
        html_visualizer.write_paragraph("Log odd top: " + s)
        not_found = set()

        def get_log_odd(x):
            x = tokenizer.stemmer.stem(x)
            if x not in log_odd:
                not_found.add(x)
            return log_odd[x]

        def get_probs(x):
            x = tokenizer.stemmer.stem(x)
            if x not in topic_lm_prob:
                not_found.add(x)
            return topic_lm_prob[x]

        for i in range(top_n):
            try:
                doc = load_doc(q_res[i].doc_id)
                cells = lmap(lambda x: get_cell_from_token(x, get_log_odd(x)),
                             doc)
                html_visualizer.write_headline("Doc rank {}".format(i))
                html_visualizer.multirow_print(cells, width=20)
            except KeyError:
                pass
        html_visualizer.write_paragraph("Not found: {}".format(not_found))
Esempio n. 9
0
def show_analyzed_html(analyzed_failture_cases: List[AnalyzedCase]):
    tokenizer = get_tokenizer()
    html = HtmlVisualizer("ca_contradiction_tokens.html")

    def get_token_scored_cells(sent1, sent2, token_score):
        tokens1 = tokenizer.tokenize(sent1)
        tokens2 = tokenizer.tokenize(sent2)
        print(token_score)

        score_for_1 = token_score[1:1 + len(tokens1)]
        score_for_2 = token_score[2 + len(tokens1):2 + len(tokens1) +
                                  len(tokens2)]

        assert len(tokens1) == len(score_for_1)
        assert len(tokens2) == len(score_for_2)

        def get_cells(tokens, scores):
            cap = max(max(scores), 1)
            factor = 100 / cap

            def normalize_score(s):
                return min(s * factor, 100)

            return list(
                [Cell(t, normalize_score(s)) for t, s in zip(tokens, scores)])

        cells1 = get_cells(tokens1, score_for_1)
        cells2 = get_cells(tokens2, score_for_2)
        return cells1, cells2

    def print_scored_sentences(scores):
        for i, _ in enumerate(scores):
            if i % 2 == 0:
                sent1, sent2, score1, token_score1 = scores[i]
                _, _, score2, token_score2 = scores[i + 1]
                if is_cont(score1):
                    cells1, cells2 = get_token_scored_cells(
                        sent1, sent2, token_score1)
                    html.write_paragraph(
                        "Forward, P(Contradiction) = {}".format(score1[2]))
                    html.write_table([cells1])
                    html.write_table([cells2])

                if is_cont(score2):
                    cells1, cells2 = get_token_scored_cells(
                        sent2, sent1, token_score2)
                    html.write_paragraph(
                        "Backward, P(Contradiction) = {}".format(score2[2]))
                    html.write_table([cells1])
                    html.write_table([cells2])

    def print_analyzed_case(analyzed_case: AnalyzedCase):
        def print_part(score_list):
            cnt = count_cont(score_list)
            s = "{} of {}".format(cnt, len(score_list))
            html.write_paragraph(s)
            print_scored_sentences(score_list)

        html.write_paragraph("Gold")
        print_part(analyzed_case.score_g)
        html.write_paragraph("Pred")
        print_part(analyzed_case.score_p)

    def is_cont(scores):
        return np.argmax(scores) == 2

    def is_cont_strict(scores):
        return scores[2] > 0.9

    def count_cont(result_list):
        num_cont = sum(
            [1 for _, _, scores, _ in result_list if is_cont(scores)])

    for idx, dp in enumerate(analyzed_failture_cases):
        html.write_headline("Data point {}".format(idx))
        html.write_paragraph("------------")
        print_analyzed_case(dp)
Esempio n. 10
0
def show_all(run_name, data_id):
    num_tags = 3
    num_select = 1
    pickle_name = "save_view_{}_{}".format(run_name, data_id)
    tokenizer = get_tokenizer()

    data_loader = get_modified_data_loader2(HPSENLI3(), NLIExTrainConfig())

    explain_entries = load_from_pickle(pickle_name)
    explain_entries = explain_entries

    visualizer = HtmlVisualizer(pickle_name + ".html")
    tex_visulizer = TexTableNLIVisualizer(pickle_name + ".tex")
    tex_visulizer.begin_table()
    selected_instances = [[], [], []]
    for idx, entry in enumerate(explain_entries):
        x0, logits, scores = entry

        pred = np.argmax(logits)
        input_ids = x0
        p, h = data_loader.split_p_h_with_input_ids(input_ids, input_ids)
        p_tokens = tokenizer.convert_ids_to_tokens(p)
        h_tokens = tokenizer.convert_ids_to_tokens(h)

        p_rows = []
        h_rows = []
        p_rows.append(cells_from_tokens(p_tokens))
        h_rows.append(cells_from_tokens(h_tokens))

        p_score_list = []
        h_score_list = []
        for j in range(num_tags):
            tag_name = data_generator.NLI.nli_info.tags[j]
            p_score, h_score = data_loader.split_p_h_with_input_ids(scores[j], input_ids)
            normalize_fn = normalize

            add = True
            if pred == "0":
                add = tag_name == "match"
            if pred == "1":
                add = tag_name == "mismatch"
            if pred == "2":
                add = tag_name == "conflict"

            def format_scores(raw_scores):
                def format_float(s):
                    return "{0:.2f}".format(s)

                norm_scores = normalize_fn(raw_scores)

                cells = [Cell(format_float(s1), s2, False, False) for s1, s2 in zip(raw_scores, norm_scores)]
                if tag_name == "mismatch":
                    set_cells_color(cells, "G")
                elif tag_name == "conflict":
                    set_cells_color(cells, "R")
                return cells

            if add:
                p_rows.append(format_scores(p_score))
                h_rows.append(format_scores(h_score))

            p_score_list.append(p_score)
            h_score_list.append(h_score)

        pred_str = ["Entailment", "Neutral" , "Contradiction"][pred]

        out_entry = pred_str, p_tokens, h_tokens, p_score_list, h_score_list

        if len(selected_instances[pred]) < num_select :
            selected_instances[pred].append(out_entry)
            visualizer.write_headline(pred_str)
            visualizer.multirow_print_from_cells_list(p_rows)
            visualizer.multirow_print_from_cells_list(h_rows)
            visualizer.write_instance(pred_str, p_rows, h_rows)

            tex_visulizer.write_paragraph(str(pred))
            tex_visulizer.multirow_print_from_cells_list(p_rows, width=13)
            tex_visulizer.multirow_print_from_cells_list(h_rows, width=13)

        if all([len(s) == num_select for s in selected_instances]):
            break

    tex_visulizer.close_table()
    return selected_instances
Esempio n. 11
0
def load_bert_like():
    disable_eager_execution()
    model = BertLike()
    sess = init_session()
    #sess.run(tf.compat.v1.global_variables_initializer())
    load_v2_to_v2(sess, get_bert_full_path(), False)

    attention_prob_list, = sess.run([model.attention_probs_list])
    html = HtmlVisualizer("position.html")

    for layer_no, attention_prob in enumerate(attention_prob_list):
        html.write_headline("Layer {}".format(layer_no))
        acc_dict = {}

        zero_scores = [list() for _ in range(12)]

        for loc in range(2, 40, 2):
            print("Source : ", loc)
            for target_loc in range(20):
                offset = target_loc - loc

                print(offset, end=" ")
                for head_idx in range(num_head):
                    key = offset, head_idx
                    if key not in acc_dict:
                        acc_dict[key] = []
                    e = attention_prob[0, head_idx, loc, target_loc]
                    if target_loc != 0:
                        acc_dict[key].append(e)
                    else:
                        zero_scores[head_idx].append(e)
                    print("{0:.2f}".format(e * 100), end=" ")
                print()

        rows = [[Cell("Loc")] + [Cell("Head{}".format(i)) for i in range(12)]]
        for offset in range(-7, +7):
            print(offset, end=" ")
            scores = []
            for head_idx in range(12):
                key = offset, head_idx

                try:
                    elems = acc_dict[key]
                    if len(elems) < 3:
                        raise KeyError

                    avg = average(elems)
                    scores.append(avg)
                    print("{0:.2f}".format(avg * 100), end=" ")
                except KeyError:
                    print("SKIP")
            print()
            rows.append([Cell(offset)] +
                        [Cell(float(v * 100), v * 1000) for v in scores])
        html.write_table(rows)

        html.write_paragraph("Attention to first token")
        zero_scores = [average(l) for l in zero_scores]
        rows = [[Cell("   ")] + [Cell("Head{}".format(i)) for i in range(12)],
                [Cell("   ")] +
                [Cell(float(v * 100), v * 1000) for v in zero_scores]]
        html.write_table(rows)
Esempio n. 12
0
def pred_loss_view():
    tokenizer = get_tokenizer()
    filename = "tlm_loss_pred.pickle"
    filename = "tlm_loss_pred_on_dev.pickle"
    p = os.path.join(output_path, filename)
    data = pickle.load(open(p, "rb"))

    batch_size, seq_length = data[0]['input_ids'].shape

    keys = list(data[0].keys())
    vectors = {}

    for e in data:
        for key in keys:
            if key not in vectors:
                vectors[key] = []
            vectors[key].append(e[key])

    for key in keys:
        vectors[key] = np.concatenate(vectors[key], axis=0)

    html_writer = HtmlVisualizer("pred_make_sense_dev.html", dark_mode=False)

    n_instance = len(vectors['input_ids'])
    n_instance = min(n_instance, 100)
    for inst_idx in range(n_instance):
        tokens = tokenizer.convert_ids_to_tokens(
            vectors['input_ids'][inst_idx])
        locations = list(vectors['masked_lm_positions'][inst_idx])

        def is_dependent(token):
            return len(token) == 1 and not token[0].isalnum()

        cells = []
        for i in range(len(tokens)):
            f_same_pred = False
            score = 0
            if i in locations and i != 0:
                i_idx = locations.index(i)
                tokens[i] = "[{}:{}]".format(i_idx, tokens[i])
                pred_diff = vectors['pred_diff'][inst_idx][i_idx]
                gold_diff = vectors['gold_diff'][inst_idx][i_idx]
                pred_label = pred_diff > 0.3
                gold_label = gold_diff > 0.3
                if pred_label:
                    score = 100
                    if gold_label:
                        f_same_pred = True
                else:
                    if gold_label:
                        score = 30
                        f_same_pred = False

            if tokens[i] == "[SEP]":
                tokens[i] = "[SEP]<br>"

            if tokens[i] != "[PAD]":
                term = tokens[i]
                cont_left = term[:2] == "##"
                cont_right = i + 1 < len(tokens) and tokens[i + 1][:2] == "##"
                if i + 1 < len(tokens):
                    dependent_right = is_dependent(tokens[i + 1])
                else:
                    dependent_right = False

                dependent_left = is_dependent(tokens[i])

                if cont_left:
                    term = term[2:]

                space_left = "&nbsp;" if not (cont_left
                                              or dependent_left) else ""
                space_right = "&nbsp;" if not (cont_right
                                               or dependent_right) else ""

                if f_same_pred:
                    cells.append(Cell(term, score, space_left, space_right))
                else:
                    cells.append(
                        Cell(term,
                             score,
                             space_left,
                             space_right,
                             target_color="R"))

        row = []
        for cell in cells:
            row.append(cell)
            if len(row) == 20:
                html_writer.write_table([row])
                row = []

        row_head = [
            Cell("Index"),
            Cell("P]Prob1"),
            Cell("P]Prob2"),
            Cell("G]Prob1"),
            Cell("G]Prob2"),
            Cell("P]Diff"),
            Cell("G]Diff"),
        ]

        def f_cell(obj):
            return Cell("{:04.2f}".format(obj))

        rows = [row_head]
        pred_diff_list = []
        gold_diff_list = []
        for idx, pos in enumerate(locations):
            if pos == 0:
                break
            pred_diff = vectors['pred_diff'][inst_idx][idx]
            gold_diff = vectors['gold_diff'][inst_idx][idx]
            pred_diff_list.append(pred_diff)
            gold_diff_list.append(gold_diff)

            row = [
                Cell(idx),
                f_cell(vectors['prob1'][inst_idx][idx]),
                f_cell(vectors['prob2'][inst_idx][idx]),
                f_cell(math.exp(-vectors['loss_base'][inst_idx][idx])),
                f_cell(math.exp(-vectors['loss_target'][inst_idx][idx])),
                f_cell(pred_diff),
                f_cell(gold_diff),
            ]
            rows.append(row)

        html_writer.write_table(rows)

        pred_diff = np.average(pred_diff_list)
        gold_diff = np.average(gold_diff_list)
        html_writer.write_paragraph(
            "Average Pred diff ={:04.2f} Observed diff={:04.2f} ".format(
                pred_diff, gold_diff))

        if pred_diff > 0.3:
            html_writer.write_headline("High Drop")
        elif pred_diff < 0.1:
            html_writer.write_headline("Low Drop")
Esempio n. 13
0
def do():
    pred_file_name = "RLPP_0.pickle"
    pred_file_name = "ukp_rel.pickle"
    record_file_name = "C:\\work\\Code\\Chair\\output\\unmasked_pair_x3_0"
    record_file_name = "C:\\work\\Code\\Chair\\output\\tf_enc"
    todo = [
        ("RLPP_0.pickle", "C:\\work\\Code\\Chair\\output\\unmasked_pair_x3_0",
         "RLPP_wiki.html"),
        ("ukp_rel.pickle", "C:\\work\\Code\\Chair\\output\\tf_enc",
         "RLPP_ukp.html")
    ]
    x = []
    y = []
    for pred_file_name, record_file_name, out_name in todo:
        viewer = EstimatorPredictionViewerGosford(pred_file_name)
        html = HtmlVisualizer(out_name)
        itr1 = load_record_v2(record_file_name)
        itr2 = viewer.__iter__()
        cnt = 0
        for features, entry in zip(itr1, itr2):
            cnt += 1
            if cnt > 200:
                break
            input_ids1 = entry.get_tokens("input_ids")
            prob1 = entry.get_vector("prob1")
            prob2 = entry.get_vector("prob2")

            cells = viewer.cells_from_tokens(input_ids1)
            p1_l = []
            p2_l = []
            useful_l = []

            row1 = []
            row2 = []
            row3 = []
            row4 = []
            for j, cell in enumerate(cells):
                p1 = float(prob1[j])
                p2 = float(prob2[j])
                x.append([p1])
                y.append(p2)
                u = useful(p1, p2)
                score = (1 - u) * 100
                cell.highlight_score = score
                row1.append(cell)
                row2.append(Cell(p1, score))
                row3.append(Cell(p2, score))
                row4.append(Cell(u, score))

                p1_l.append(p1)
                p2_l.append(p2)
                useful_l.append(u)
                if len(row1) > 20:
                    rows = [row1, row2, row3, row4]
                    row1 = []
                    row2 = []
                    row3 = []
                    row4 = []
                    html.write_table(rows)

            html.write_paragraph("p1: {}".format(average(p1_l)))
            html.write_paragraph("p2: {}".format(average(p2_l)))
            html.write_paragraph("useful: {}".format(average(useful_l)))

            if average(useful_l) < 0.4:
                html.write_headline("Low Score")

        l = list(zip(x, y))
        random.shuffle(l)
        l = l[:1000]
        x, y = zip(*l)
        lin = LinearRegression()
        lin.fit(x, y)

        poly = PolynomialFeatures(degree=4)
        X_poly = poly.fit_transform(x)
        poly.fit(X_poly, y)
        lin2 = LinearRegression()
        lin2.fit(X_poly, y)
        plt.scatter(x, y, color='blue')

        plt.plot(x, lin2.predict(poly.fit_transform(x)), color='red')
        plt.title('Polynomial Regression')

        plt.show()
Esempio n. 14
0
def doit(filename):
    name = filename.split(".")[0]
    bin_fn, mean_d, std_d = statistics_tlm()

    def get_score(p1, p2):
        key = bin_fn(p1)
        v = min(p2, p1)
        return (v - mean_d[key]) / std_d[key]

    st_list = []
    ed_list = []
    std_list = []
    mean_list = []
    for key in mean_d:
        st, ed = key
        st_list.append(st)
        ed_list.append(ed)
        std_list.append(std_d[key])
        mean_list.append(mean_d[key])

    mean_list = np.expand_dims(np.array(mean_list), 0)
    std_list = np.expand_dims(np.array(std_list), 0)
    st_list = np.expand_dims(np.array(st_list), 0)
    ed_list = np.expand_dims(np.array(ed_list), 0)

    def get_scores_lin(prob1_list, prob2_list):
        v2 = np.min(np.stack([prob1_list, prob2_list], axis=1), axis=1)
        v2 = np.expand_dims(v2, 1)
        all_scores = (v2 - mean_list) / std_list
        prob1_list = np.expand_dims(prob1_list, 1)
        f1 = np.less_equal(st_list, prob1_list)
        f2 = np.less(prob1_list, ed_list)
        f = np.logical_and(f1, f2)
        all_scores = all_scores * f
        scores = np.sum(all_scores, axis=1)
        return scores

    data = EstimatorPredictionViewerGosford(filename)
    amp = 0.5
    html_writer = HtmlVisualizer("{}_{}.html".format(name, amp), dark_mode=False)

    for inst_i, entry in enumerate(data):
        if inst_i > 10:
            break
        tokens = entry.get_mask_resolved_input_mask_with_input()
        scores = entry.get_vector("priority_score")
        loss1 = entry.get_vector("lm_loss1")
        loss2 = entry.get_vector("lm_loss2")
        #scores1 = get_scores_lin(loss_to_prob(loss1), loss_to_prob(loss2))
        #scores = [get_score(v1, v2) for v1,v2 in zip(loss_to_prob(loss1), loss_to_prob(loss2))]
        #assert np.all(np.less(np.abs(scores - scores1), 0.01))

        prob_scores = probabilty(scores, amp)
        prob_strs = ["{:06.6f}".format(v*1000) for v in prob_scores]

        def normalize(prob):
            # 0-> Good
            # -1 -> Bad
            return min(prob * 10000, 100)

        norm_scores = lmap(normalize, prob_scores)
        cells = data.cells_from_tokens(tokens, norm_scores)
        cells2 = data.cells_from_anything(scores, norm_scores)
        cells3 = data.cells_from_anything(prob_strs, norm_scores)
        cells4 = data.cells_from_anything(loss_to_prob(loss1), norm_scores)
        cells5 = data.cells_from_anything(loss_to_prob(loss2), norm_scores)
        html_writer.multirow_print_from_cells_list([cells, cells2, cells3, cells4, cells5])
        html_writer.write_headline("")