Example #1
0
def log_predictions(sentences, nn_model, w2v_model, index_to_token, no_predictions, stats_info=None):
    with codecs.open(PREDICTIONS_FILE+'_'+str(no_predictions), 'w', 'utf-8') as predictions:
        for sent in sentences:
            prediction = predict_sentence(sent, nn_model, w2v_model, index_to_token)
            # _logger.info('[%s] -> [%s]' % (sent, prediction))
            print "WRITING PREDICTIONS"
            predictions.write(prediction + '\n')
Example #2
0
def predict():
    # preprocess the dialog and get index for its vocabulary
    processed_dialog_lines, index_to_token = \
        get_processed_dialog_lines_and_index_to_token(CORPUS_PATH, PROCESSED_CORPUS_PATH, TOKEN_INDEX_PATH)

    # dualize iterator
    dialog_lines_for_w2v, dialog_lines_for_nn = tee(processed_dialog_lines)
    _logger.info('-----')

    # use gensim realisatino of word2vec instead of keras embeddings due to extra flexibility
    w2v_model = w2v.get_dialogs_model(W2V_PARAMS, dialog_lines_for_w2v)
    _logger.info('-----')

    nn_model = get_nn_model(token_dict_size=len(index_to_token))

    while True:
        input_sentence = raw_input('> ')
        predict_sentence(input_sentence, nn_model, w2v_model, index_to_token)
Example #3
0
def log_predictions(sentences,
                    nn_model,
                    w2v_model,
                    index_to_token,
                    stats_info=None):
    for sent in sentences:
        prediction = predict_sentence(sent, nn_model, w2v_model,
                                      index_to_token)
        _logger.info('[%s] -> [%s]' % (sent, prediction))
Example #4
0
def log_predictions(sentences, nn_model, w2v_model, index_to_token, stats_info=None):
    for sent in sentences:
        prediction = predict_sentence(sent, nn_model, w2v_model, index_to_token)
        _logger.info("[%s] -> [%s]" % (sent, prediction))