コード例 #1
0
def edit():

    events = request.form['events']
    #print(events)
    
    events = [event.split() for event in events.split(',')]
    #print(events)
    
    processedEvents = [EditExample([[event[0]], [event[1]], event[2:]],['?']) for event in tqdm(events)]
    
    valid_eval = ret_model.ret_and_make_ex(processedEvents, new_lsh, examples.train, 0, train_mode=False)
    beam_list, edit_traces = edit_model.edit(valid_eval)

    # base retriever.
    import gtd.retrieval_func as rf
    lsh, dict = rf.make_hash(examples.train)
    output_index = rf.grab_nbs(processedEvents, lsh, dict)
    ret_pred = rf.generate_predictions(examples.train, output_index)

    ####
    # eval code
    gen_out = []
    for i in tqdm(range(len(edit_traces))):
        gen = beam_list[i][0]
        gen_out.append(gen)
        
    dist = []
    prob = []

    for i in tqdm(range(len(edit_traces))):
        dist.append(str(valid_eval[i].dist))
        prob.append(str(edit_traces[i].decoder_trace.candidates[0].prob))
    
    output = [gen_out, dist, prob]
    return str(json.dumps(output))
コード例 #2
0
def edit(events):

    x = "with enraged yells <PRP> repeatedly throws Synset('entity.n.01') at <ORGANIZATION>8 Synset('natural_phenomenon.n.01') that seals the Synset('action.n.01') startling <PERSON>14 the Synset('defender.n.01') on Synset('group_action.n.01')".split(
    )
    processedEvents = [
        EditExample([[event[0]], [event[1]], event[2:]], x) for event in events
    ]

    print(processedEvents[0])

    valid_eval = ret_model.ret_and_make_ex(processedEvents,
                                           new_lsh,
                                           examples.train,
                                           0,
                                           train_mode=False)
    beam_list, edit_traces = edit_model.edit(valid_eval)

    # base retriever.
    import gtd.retrieval_func as rf
    lsh, dict = rf.make_hash(examples.train)
    output_index = rf.grab_nbs(processedEvents, lsh, dict)
    ret_pred = rf.generate_predictions(examples.train, output_index)

    ####
    # eval code
    gen_out = []
    for i in range(len(edit_traces)):
        gen = beam_list[i][0]
        gen_out.append(gen)

    dist = []
    prob = []

    for i in range(len(edit_traces)):
        dist.append(str(valid_eval[i].dist))
        prob.append(str(edit_traces[i].decoder_trace.candidates[0].prob))

    output = {'output': gen_out, 'distances': dist, 'beamProb': prob}
    print(output)
    return output
コード例 #3
0
                all_ranks_noret[i].append(gold_rank)
        position += 1
    del token_list
    del vocab_probs
    return all_ranks_noret


all_ranks_noret = []
for chunk in tqdm(chunks(examples.test[0:eval_num], 16), total=eval_num / 16):
    all_ranks_noret.extend(eval_batch_noret(chunk))

###
# base retriever.
import gtd.retrieval_func as rf
lsh, dict = rf.make_hash(examples.train)
output_index = rf.grab_nbs(examples.test[0:eval_num], lsh, dict)
ret_pred = rf.generate_predictions(examples.train, output_index)


def agree_vec(ref, targ):
    rank_vec = []
    for i in range(max(len(ref), len(targ))):
        if i < len(targ) and i < len(ref):
            agree_ind = ref[i] == targ[i]
            rank_vec.append((1.0 - agree_ind) * 100.0)
        else:
            rank_vec.append(100.0)
    return rank_vec


all_ranks_ret_fixed = []