Пример #1
0
def predict(context, exported_model):
    """Start a repl executing examples against a caffe2 model."""
    config = context.obj.load_config()
    print(f"Loading model from {exported_model or config.export_caffe2_path}")
    predictor = create_predictor(config, exported_model)

    print(f"Model loaded, reading example JSON from stdin")
    for line in sys.stdin.readlines():
        input = json.loads(line)
        predictions = predictor(input)
        pprint.pprint(predictions)
Пример #2
0
import sys
import flask
import pytext

config_file = sys.argv[1]
model_file = sys.argv[2]

config = pytext.load_config(config_file)
predictor = pytext.create_predictor(config, model_file)

app = flask.Flask(__name__)

@app.route('/get_flight_info', methods=['GET', 'POST'])
def get_flight_info():
    text = flask.request.data.decode()

    # Pass the inputs to PyText's prediction API
    result = predictor({"raw_text": text})

    # Results is a list of output blob names and their scores.
    # The blob names are different for joint models vs doc models
    # Since this tutorial is for both, let's check which one we should look at.
    doc_label_scores_prefix = (
        'scores:' if any(r.startswith('scores:') for r in result)
        else 'doc_scores:'
    )

    # For now let's just output the top document label!
    best_doc_label = max(
        (label for label in result if label.startswith(doc_label_scores_prefix)),
        key=lambda label: result[label][0],
import sys
import pytext
config_file = sys.argv[1]
model_file = sys.argv[2]

config = pytext.load_config(config_file)
predictor = pytext.create_predictor(config, model_file)
text = input('\nPlease Enter the text\n')
result = predictor({"raw_text": text})
doc_label_scores_prefix = (
        'scores:' if any(r.startswith('scores:') for r in result)
        else 'doc_scores:'
    )

    # For now let's just output the top document label!
best_doc_label = max(
        (label for label in result if label.startswith(doc_label_scores_prefix)),
        key=lambda label: result[label][0],
    # Strip the doc label prefix here
    )[len(doc_label_scores_prefix):]
<<<<<<< HEAD
print("-> ",best_doc_label)
=======
print(best_doc_label)
>>>>>>> ef352d25559a45f84c010790e473a30fc6ce5526
Пример #4
0
    word_scores = {label.split(':')[1]: result[label] for label in result if label.startswith(slot_prefix)}
    word_labels = []
    for i in range(len(word_scores[list(word_scores.keys())[0]])):
        best_label = '--'
        best_score = 100
        for label in word_scores.keys():
            if word_scores[label][i] < best_score:
                best_score = word_scores[label][i]
                best_label = label
        word_labels.append(best_label)
    return word_labels
    #doc_label_scores_prefix = ('scores:' if any(r.startswith('scores:') for r in result) else 'doc_scores:')
    #return max((label for label in result if label.startswith(doc_label_scores_prefix)), key=lambda label: result[label][0],)[len(doc_label_scores_prefix):]

configp = pytext.load_config(config['paths']['etc']['pytext']['model-config-extended'])
predictor = pytext.create_predictor(configp, config['paths']['etc']['pytext']['model-extended'])

test_dataset = fo.read_json(config['paths']['datasets']['pytext']['test-extended'])

counter = 0
positive_counter = 0
total_recall = []
#print(f"{'Sample':80s}\t{'recognized-label':20s}\t{'true-label':20s}\t{'correctly-recognized':30s}")
for label in test_dataset.keys():
    for sample in test_dataset[label]:
        recognized = [slot for slot in get_best_slots(predictor({"text": sample["text"].lower(), "doc_weight": 1, "word_weight": 1}))]# if slot != '__UNKNOWN__']
        parsed_command = list(zip(sample['text'].lower().split(' '), recognized))
        parsed_right_command = list(zip(sample['text'].lower().split(' '), sample['slots']))
        print(f"-- Recognized slots")
        ul.get_request_type(parsed_command, filename = config['paths']['datasets']['request-mapping']['requests'])
        print(f"-- Right slots")