コード例 #1
0
def predict_json():
    # Empty cache to ensure larger batch can be loaded for testing
    if request.method == "POST":
        text = request.form['textInput']
        data = [tokenize(text)]
    else:
        text = "Barack Obama went to Paris and never returned to the USA."
        text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series."
        text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show."
        data = [text, text1, text2]
        data = [tokenize(text) for text in data]
    torch.cuda.empty_cache()
    tokens = [obj["value"] for obj in data]
    output = list(
        get_model_output(model, tokens, args, readers, vocab, test_iterator))
    idx = 0
    classification_output_json = output_to_json(tokens[idx], output[idx],
                                                vocab)
    classification_output_json["text"] = text

    df = output_to_df(tokens[idx], output[idx], vocab)
    for k in data[idx].keys():
        if k != "value":
            df[k] = data[idx][k]
    #df = df.set_index("tokens")
    output_json = {
        "classification": classification_output_json,
        "tagging": json.loads(df.to_json(orient='table')),
    }
    return app.response_class(response=json.dumps(output_json, indent=2),
                              status=200,
                              mimetype='application/json')
コード例 #2
0
def predict():
    # Empty cache to ensure larger batch can be loaded for testing
    if request.method == "POST":
        text = request.form['textInput']
        data = [tokenize(text)]
    else:
        text = "Barack Obama went to Paris and never returned to the USA."
        text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series."
        text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show."
        data = [text, text1, text2]
        data = [tokenize(text) for text in data]
    torch.cuda.empty_cache()
    tokens = [obj["value"] for obj in data]
    output = list(
        get_model_output(model, tokens, args, readers, vocab, test_iterator))
    idx = 0
    output_json = output_to_json(tokens[idx], output[idx], vocab)
    output_json["text"] = text
    return render_template("classification.html",
                           text=text,
                           output_json=output_json)
コード例 #3
0
def predict_json(texts=None):
    # Empty cache to ensure larger batch can be loaded for testing
    if texts:
        data = [tokenize(text) for text in texts]
    else:
        text = "Barack Obama went to Paris and never returned to the USA."
        text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series."
        text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show."
        texts = [text, text1, text2]
        data = [tokenize(text) for text in texts]
    torch.cuda.empty_cache()
    tokens = [obj["value"] for obj in data]
    output = list(
        get_model_output(model, tokens, args, readers, vocab, test_iterator))

    output_json = [
        dict(text=text,
             doc_idx=i,
             **output_to_json(tokens[i], output[i], vocab))
        for i, text in enumerate(texts)
    ]
    return output_json