def evaluate_sentence(sentence, vocabulary):
    """
	Translates a string to its equivalent in the integer vocabulary and feeds it to the network. 
	Outputs result to stdout. 
	"""
    #print(current_milli_time())
    x_to_eval = data_helpers.string_to_int(sentence, vocabulary, maxLengthInX)
    #print(current_milli_time())
    #result = sess.run(tf.argmax(network_out,1), feed_dict={data_in: x_to_eval, dropout_keep_prob: 1.0})
    #print(current_milli_time())
    unnorm_result = sess.run(network_out,
                             feed_dict={
                                 data_in: x_to_eval,
                                 dropout_keep_prob: 1.0
                             })
    #print(current_milli_time())

    network_sentiment = "NEU"
    if unnorm_result[0].item(1) > 0.6:
        network_sentiment = "POS"
    elif unnorm_result[0].item(1) < 0.4:
        network_sentiment = "NEG"

    log("Custom input evaluation:", network_sentiment)
    log("Actual output:", str(unnorm_result[0]))
    return network_sentiment
def evaluate_sentence(sentence, vocabulary):
    x_to_eval = string_to_int(clean_str(sentence), vocabulary, max(len(_) for _ in x))
    result = sess.run(tf.argmax(network_out, 1),
                      feed_dict={data_in: x_to_eval,
                                 dropout_keep_prob: 1.0})
    unorm_result = sess.run(network_out, feed_dict={data_in: x_to_eval,
                                                    dropout_keep_prob: 1.0})

    return result[0]
def evaluate_sentence(sentence, vocabulary):
	"""
	Translates a string to its equivalent in the integer vocabulary and feeds it to the network. 
	Outputs result to stdout. 
	"""
	x_to_eval = data_helpers.string_to_int(sentence, vocabulary, max(len(i) for i in x))
	result = sess.run(tf.argmax(network_out,1), feed_dict={data_in: x_to_eval, dropout_keep_prob: 1.0})
	unnorm_result = sess.run(network_out, feed_dict={data_in: x_to_eval, dropout_keep_prob: 1.0})
	network_sentiment = "POS" if result == 1 else "NEG"
	log("Custom input evaluation:", network_sentiment)
	log("Actual output:", str(unnorm_result[0]))
示例#4
0
def evaluate_sentence(sentence, vocabulary):
    """
    Translates a string to its equivalent in the integer vocabulary and feeds it
    to the network.
    Outputs result to stdout.
    """
    x_to_eval = string_to_int(sentence, vocabulary, max(len(_) for _ in x))
    result = sess.run(tf.argmax(network_out, 1),
                      feed_dict={data_in: x_to_eval,
                                 dropout_keep_prob: 1.0})
    unnorm_result = sess.run(network_out, feed_dict={data_in: x_to_eval,
                                                     dropout_keep_prob: 1.0})
    network_sentiment = 'POS' if result == 1 else 'NEG'
    log('Custom input evaluation:', network_sentiment)
    log('Actual output:', str(unnorm_result[0]))
示例#5
0
def evaluate_sentence(sentence):
    x_to_eval = string_to_int(sentence["text_cleaned"], vocabulary,
                              max(len(_) for _ in x))
    result = sess.run(tf.argmax(network_out, 1),
                      feed_dict={
                          data_in: x_to_eval,
                          dropout_keep_prob: 1.0
                      })
    # unnorm_result = sess.run(network_out, feed_dict={data_in: x_to_eval,
    #                                                 dropout_keep_prob: 1.0})
    network_sentiment = 'POS' if result == 1 else 'NEG'
    # log('Custom input evaluation:', network_sentiment)
    # log('Actual output:', str(unnorm_result[0]))
    if result is not None:
        return result[0]
    else:
        return 'ERROR'
def evaluate_sentence(id, sentence, vocabulary):
    """
    Translates a string to its equivalent in the integer vocabulary and feeds it
    to the network.
    Outputs result to stdout.
    """
    x_to_eval = string_to_int(sentence, vocabulary, max(len(_) for _ in x))
    result = sess.run(tf.argmax(network_out, 1),
                      feed_dict={
                          data_in: x_to_eval,
                          dropout_keep_prob: 1.0
                      })
    unnorm_result = sess.run(network_out,
                             feed_dict={
                                 data_in: x_to_eval,
                                 dropout_keep_prob: 1.0
                             })
    network_sentiment = '1' if result == 1 else '-1'
    log(id + "," + network_sentiment)
def evaluate_sentence(sentence, vocabulary):
	"""
	Translates a string to its equivalent in the integer vocabulary and feeds it to the network. 
	Outputs result to stdout. 
	"""
	#print(current_milli_time())
	x_to_eval = data_helpers.string_to_int(sentence, vocabulary, maxLengthInX)
	#print(current_milli_time())
	#result = sess.run(tf.argmax(network_out,1), feed_dict={data_in: x_to_eval, dropout_keep_prob: 1.0})
	#print(current_milli_time())
	unnorm_result = sess.run(network_out, feed_dict={data_in: x_to_eval, dropout_keep_prob: 1.0})
	#print(current_milli_time())

	network_sentiment = "NEU"
	if unnorm_result[0].item(1) > 0.6:
		network_sentiment = "POS"
	elif unnorm_result[0].item(1) < 0.4:
		network_sentiment = "NEG"

	log("Custom input evaluation:", network_sentiment)
	log("Actual output:", str(unnorm_result[0]))
	return network_sentiment
                         RUN_DIR,
                         'minimal_graph.txt',
                         as_text=True)

if FLAGS.submit:
    log('Loading submit data')
    submit_examples = list(
        open("../twitter-datasets/test_data.txt", "r").readlines())
    submit_examples = [s.strip() for s in submit_examples]
    splitter = [s.split(',', 1) for s in submit_examples]
    sentences = [s[1] for s in splitter]
    ids = [s[0] for s in splitter]
    #evaluate_sentence(tweet_id, tweet_data, vocabulary)
    max_len = max(len(_) for _ in x)
    x_to_eval = [
        string_to_int(sentence, vocabulary, max_len)[0]
        for sentence in sentences
    ]
    log('generating submissions data')
    result = sess.run(tf.argmax(network_out, 1),
                      feed_dict={
                          data_in: x_to_eval,
                          dropout_keep_prob: 1.0
                      })
    result = ['1' if r == 1 else '-1' for r in result]
    log('saving submissions')
    with open("../twitter-datasets/tscnnsubmission.csv", "w") as f:
        f.write("Id,Prediction\n")
        for i, p in enumerate(result):
            f.write(str(ids[i]) + "," + str(p) + "\n")