Exemplo n.º 1
0
def return_summary(request: Request,
                   input_text: str = Form(...),
                   summary_len: int = Form(150),
                   beams: int = Form(2)):
    summary = generate_summary(input_text, summary_len, beams, model,
                               tokenizer)
    return templates.TemplateResponse('index.html',
                                      context={
                                          'request': request,
                                          'summary': summary,
                                          'text': input_text
                                      })
Exemplo n.º 2
0
def get_summary():
    """ Returns a summary of an article found at a given url """
    # Parse url from form response
    req = request.form.to_dict()
    memory = json.loads(req['Memory'])
    url = memory['twilio']['collected_data']['summarize_article']['answers']['article_url']['answer']

    # Parse article text
    text = parse_article(url)

    # Generate summary
    summary = generate_summary(text)

    # Return summary
    actions = {
        "actions": [
            {
                "say": summary
            }
        ]
    }
    return actions
    with h5py.File(PATH_TVSum, 'r') as hdf:
        for video_name in keys:
            video_index = video_name[6:]

            user_summary = np.array(
                hdf.get('video_' + video_index + '/user_summary'))
            sb = np.array(hdf.get('video_' + video_index + '/change_points'))
            n_frames = np.array(hdf.get('video_' + video_index + '/n_frames'))
            positions = np.array(hdf.get('video_' + video_index + '/picks'))

            all_user_summary.append(user_summary)
            all_shot_bound.append(sb)
            all_nframes.append(n_frames)
            all_positions.append(positions)

    all_summaries = generate_summary(all_shot_bound, all_scores, all_nframes,
                                     all_positions)

    all_f_scores = []
    # compare the resulting summary with the ground truth one, for each video
    for video_index in range(len(all_summaries)):
        summary = all_summaries[video_index]
        user_summary = all_user_summary[video_index]
        f_score = evaluate_summary(summary, user_summary, eval_method)
        all_f_scores.append(f_score)

    f_score_epochs.append(np.mean(all_f_scores))
    print("f_score: ", np.mean(all_f_scores))

with open(path + '/f_scores.txt', 'w') as outfile:
    json.dump(f_score_epochs, outfile)
Exemplo n.º 4
0
import generate_summary as gs

if __name__ == "__main__":
    top_n = int(input("Enter the number of sentences you want in the summary:"))
    gs.generate_summary( "input.txt",top_n)
Exemplo n.º 5
0
    else:
        if args.model_path == "":
            raise FileNotFoundError

        model = TransformerSummarizer(ATTENTION_HEADS, N_LAYERS, N_LAYERS, DIM_FEEDFORWARD, \
                                        SEQ_LEN, VOCAB_SIZE, PAD_IDX, src_list, d_model=D_MODEL).to(device)
        model.load_state_dict(torch.load(args.model_path, map_location=device))

    Path.mkdir(out_dir, parents=True, exist_ok=True)

    with open(os.path.join(out_dir, "raw.txt"), "w", encoding="utf-8") as text, \
            open(os.path.join(out_dir, "pred.txt"), "w", encoding="utf-8") as pred, \
                open(os.path.join(out_dir, "true.txt"), "w", encoding="utf-8") as true:

        for data in tqdm(test_data, total=TEST_SIZE):

            src_text = data.text
            trg_text = data.summ

            raw_text = " ".join(src_text)
            true_summary = " ".join(trg_text)
            prediction = generate_summary(raw_text, model, src_list, src_dict,
                                          MAX_LENGTH)

            text.write(raw_text + "\n")
            text.flush()
            true.write(true_summary + "\n")
            true.flush()
            pred.write(prediction + "\n")
            pred.flush()