Пример #1
0
def answer():
    print('Answering mode')
    alexa_question = ask_request.intent.slots.question.value
    print(alexa_question)
    text_type, new_text = context_parser(alexa_question)
    print('Text type', text_type)
    print('Global context', global_context)
    print('New text', new_text)
    new_json = generate_multiple_json(version=1,
                                      title="INSOFE",
                                      context=global_context_list,
                                      questions_list=new_text)
    print(new_json)
    if text_type == 'Q':
        qn_uuid_data, context_token_data, qn_token_data = get_json_data(
            new_json)
        answers_dict = generate_answers_prob(sess, qa_model, word2id,
                                             qn_uuid_data, context_token_data,
                                             qn_token_data)
        answers_dict = sorted(answers_dict.items(),
                              key=lambda e: e[1][1],
                              reverse=True)
        print(answers_dict, 'ad')
        # answer = answers_dict.values()[0][0].replace('insofe','insofee')
        answer = answers_dict[0][1][0].replace('insofe', 'insofee')
    else:
        answer = "I'm not sure if I understand. Come again"
    return question(answer)
Пример #2
0
def main(unused_argv):
    # Print an error message if you've entered flags incorrectly
    if len(unused_argv) != 1:
        raise Exception("There is a problem with how you entered flags: %s" %
                        unused_argv)

    # Check for Python 2
    if sys.version_info[0] != 2:
        raise Exception(
            "ERROR: You must use Python 2 but you are running Python %i" %
            sys.version_info[0])

    # Print out Tensorflow version
    print "This code was developed and tested on TensorFlow 1.4.1. Your TensorFlow version: %s" % tf.__version__

    # Define train_dir
    if not FLAGS.experiment_name and not FLAGS.train_dir and FLAGS.mode != "official_eval":
        raise Exception(
            "You need to specify either --experiment_name or --train_dir")
    FLAGS.train_dir = FLAGS.train_dir or os.path.join(EXPERIMENTS_DIR,
                                                      FLAGS.experiment_name)

    # Initialize bestmodel directory
    bestmodel_dir = os.path.join(FLAGS.train_dir, "best_checkpoint")

    # Define path for glove vecs
    FLAGS.glove_path = FLAGS.glove_path or os.path.join(
        DEFAULT_DATA_DIR, "glove.6B.{}d.txt".format(FLAGS.embedding_size))

    # Load embedding matrix and vocab mappings
    emb_matrix, word2id, id2word = get_glove(FLAGS.glove_path,
                                             FLAGS.embedding_size)

    # Get filepaths to train/dev datafiles for tokenized queries, contexts and answers
    train_context_path = os.path.join(FLAGS.data_dir, "train.context")
    train_qn_path = os.path.join(FLAGS.data_dir, "train.question")
    train_ans_path = os.path.join(FLAGS.data_dir, "train.span")
    dev_context_path = os.path.join(FLAGS.data_dir, "dev.context")
    dev_qn_path = os.path.join(FLAGS.data_dir, "dev.question")
    dev_ans_path = os.path.join(FLAGS.data_dir, "dev.span")

    # Initialize model
    qa_model = QAModel(FLAGS, id2word, word2id, emb_matrix)

    # Some GPU settings
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # Split by mode
    if FLAGS.mode == "train":

        # Setup train dir and logfile
        if not os.path.exists(FLAGS.train_dir):
            os.makedirs(FLAGS.train_dir)
        file_handler = logging.FileHandler(
            os.path.join(FLAGS.train_dir, "log.txt"))
        logging.getLogger().addHandler(file_handler)

        # Save a record of flags as a .json file in train_dir
        with open(os.path.join(FLAGS.train_dir, "flags.json"), 'w') as fout:
            json.dump(FLAGS.__flags, fout)

        # Make bestmodel dir if necessary
        if not os.path.exists(bestmodel_dir):
            os.makedirs(bestmodel_dir)

        with tf.Session(config=config) as sess:

            # Load most recent model
            initialize_model(sess,
                             qa_model,
                             FLAGS.train_dir,
                             expect_exists=False)

            # Train
            qa_model.train(sess, train_context_path, train_qn_path,
                           train_ans_path, dev_qn_path, dev_context_path,
                           dev_ans_path)

    elif FLAGS.mode == "show_examples":
        with tf.Session(config=config) as sess:

            # Load best model
            initialize_model(sess, qa_model, bestmodel_dir, expect_exists=True)

            # Show examples with F1/EM scores
            _, _ = qa_model.check_f1_em(sess,
                                        dev_context_path,
                                        dev_qn_path,
                                        dev_ans_path,
                                        "dev",
                                        num_samples=10,
                                        print_to_screen=True)

    elif FLAGS.mode == "official_eval":
        if FLAGS.json_in_path == "":
            raise Exception(
                "For official_eval mode, you need to specify --json_in_path")
        if FLAGS.ckpt_load_dir == "":
            raise Exception(
                "For official_eval mode, you need to specify --ckpt_load_dir")

        # Read the JSON data from file
        qn_uuid_data, context_token_data, qn_token_data = get_json_data(
            FLAGS.json_in_path)

        with tf.Session(config=config) as sess:

            # Load model from ckpt_load_dir
            initialize_model(sess,
                             qa_model,
                             FLAGS.ckpt_load_dir,
                             expect_exists=True)

            # Get a predicted answer for each example in the data
            # Return a mapping answers_dict from uuid to answer
            answers_dict = generate_answers_prob(sess, qa_model, word2id,
                                                 qn_uuid_data,
                                                 context_token_data,
                                                 qn_token_data)

            # Write the uuid->answer mapping a to json file in root dir
            print "Writing predictions to %s..." % FLAGS.json_out_path
            with io.open(FLAGS.json_out_path, 'w', encoding='utf-8') as f:
                f.write(unicode(json.dumps(answers_dict, ensure_ascii=False)))
                print "Wrote predictions to %s" % FLAGS.json_out_path

    else:
        raise Exception("Unexpected value of FLAGS.mode: %s" % FLAGS.mode)
Пример #3
0
def predict():
    # initialize the data dictionary that will be returned from the view
    data = {"success": False}
    print("Predict call")
    # ensure an text was properly uploaded to our endpoint
    if flask.request.method == "POST":
        print('Got request')
        print('values', flask.request.values)
        print('get_json', flask.request.get_json())
        # print('flask value',flask.request.values)
        if flask.request.values:
            message = flask.request.values['ticket']
            text_type, new_text = context_parser(message)
            print('Text type', text_type)
            print('Global context', global_context)
            print('New text', new_text)

            if text_type == "GC":
                data["predictions"] = {
                    "answer": "Current context -- " + global_context,
                    "probability": 1
                }
                print('Get context')

            elif text_type == "O":
                data["predictions"] = {"answer": new_text[0], "probability": 1}
                print('Other')

            elif text_type == 'Q':
                print('Question')
                print(new_text)
                new_json = generate_json(version=1,
                                         title="INSOFE",
                                         context=global_context,
                                         questions_list=new_text)

                # with open(fpath, 'w') as outfile:
                #     json.dump(new_json, outfile)

                # Read the JSON data from file
                # qn_uuid_data, context_token_data, qn_token_data = get_json_data(fpath)
                qn_uuid_data, context_token_data, qn_token_data = get_json_data(
                    new_json)
                answers_dict = generate_answers_prob(sess, qa_model, word2id,
                                                     qn_uuid_data,
                                                     context_token_data,
                                                     qn_token_data)
                print(answers_dict, 'ad')
                # print(answers_dict.values(), 'ad')

                # Write the uuid->answer mapping a to json file in root dir
                # print "Writing predictions to %s..." % FLAGS.json_out_path
                # with io.open(FLAGS.json_out_path, 'w', encoding='utf-8') as f:
                # 	f.write(unicode(json.dumps(answers_dict, ensure_ascii=False)))
                # 	print "Wrote predictions to %s" % FLAGS.json_out_path

                data["predictions"] = {
                    "answer": answers_dict.values()[0][0],
                    "probability": answers_dict.values()[0][1]
                }
                #  = r
                # print(r)
                # indicate that the request was a success
                data["success"] = True
            elif text_type == 'C':
                global global_context
                global_context = new_text[0]
                data["predictions"] = {
                    "answer": 'Context updated',
                    "probability": 1
                }

    else:
        print('Incorrect request type')

    # return the data dictionary as a JSON response
    return flask.jsonify(data)