def predict(): # Empty cache to ensure larger batch can be loaded for testing if request.method == "POST": text = request.form['textInput'] data = [tokenize(text)] else: text = "Barack Obama went to Paris and never returned to the USA." text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series." text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show." data = [text, text1, text2] data = [tokenize(text) for text in data] torch.cuda.empty_cache() tokens = [obj["value"] for obj in data] output = list( get_model_output(model, tokens, args, readers, vocab, test_iterator)) idx = 0 df = output_to_df(tokens[idx], output[idx], vocab) for k in data[idx].keys(): if k != "value": df[k] = data[idx][k] df = df.set_index("tokens") return render_template( 'sequence_tagging.html', text=text, df_html=df.T.to_html(classes="table table-sm table-hover"))
def predict_json(): # Empty cache to ensure larger batch can be loaded for testing if request.method == "POST": text = request.form['textInput'] data = [tokenize(text)] else: text = "Barack Obama went to Paris and never returned to the USA." text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series." text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show." data = [text, text1, text2] data = [tokenize(text) for text in data] torch.cuda.empty_cache() tokens = [obj["value"] for obj in data] output = list( get_model_output(model, tokens, args, readers, vocab, test_iterator)) idx = 0 df = output_to_df(tokens[idx], output[idx], vocab) for k in data[idx].keys(): if k != "value": df[k] = data[idx][k] #df = df.set_index("tokens") output_json = df.to_json(orient='table') return app.response_class(response=output_json, status=200, mimetype='application/json')
def predict_df(texts=None): # Empty cache to ensure larger batch can be loaded for testing if texts: data = [tokenize(text) for text in texts] else: text = "Barack Obama went to Paris and never returned to the USA." text1 = "Stan Lee was a legend who developed Spiderman and the Avengers movie series." text2 = "I just learned about donald drumph through john oliver. #JohnOliverShow such an awesome show." texts = [text, text1, text2] data = [tokenize(text) for text in texts] torch.cuda.empty_cache() tokens = [obj["value"] for obj in data] output = list( get_model_output(model, tokens, args, readers, vocab, test_iterator)) idx = 0 def _get_data_values(d): return {k: d[k] for k in d.keys() if k != "value"} #df = output_to_df(tokens[idx], output[idx], vocab) df = pd.concat([ output_to_df(tokens[i], output[i], vocab).assign(**_get_data_values(d)).assign(data_idx=i) for i, d in enumerate(data) ]) # for k in data[idx].keys(): # if k != "value": # df[k] = data[idx][k] return df