def predict(): def _get_test_dataset(): with open(TEST_DATASET_PATH) as test_fh: test_sentences = [s.strip() for s in test_fh.readlines()] return test_sentences results_filename = '_'.join(['results', str(FLAGS.num_layers), str(FLAGS.size), str(FLAGS.vocab_size)]) results_path = os.path.join(FLAGS.results_dir, results_filename) with tf.Session() as sess, open(results_path, 'w') as results_fh: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) test_dataset = _get_test_dataset() for sentence in test_dataset: # Get token-ids for the input sentence. predicted_sentence = get_predicted_sentence(sentence, vocab, rev_vocab, model, sess) print(sentence, ' -> ', predicted_sentence) results_fh.write(predicted_sentence + '\n')
def predict(): def _get_test_dataset(): with open(TEST_DATASET_PATH) as test_fh: test_sentences = [s.strip() for s in test_fh.readlines()] return test_sentences results_filename = '_'.join([ 'results', str(FLAGS.num_layers), str(FLAGS.size), str(FLAGS.vocab_size) ]) results_path = os.path.join(FLAGS.results_dir, results_filename) with tf.Session() as sess, open(results_path, 'w') as results_fh: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) test_dataset = _get_test_dataset() for sentence in test_dataset: # Get token-ids for the input sentence. predicted_sentence = get_predicted_sentence( sentence, vocab, rev_vocab, model, sess) print(sentence) print('->') print(predicted_sentence) results_fh.write(predicted_sentence + '\n')
def tsne(): with tf.Session() as sess: # Create model and load parameters. from sklearn import manifold vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # labels = [] # rows = [] # for emote in words: # if emote in vocab: # labels.append(emote) # rows.append(vocab[emote]) rows = [i for i in xrange(500)] labels = [rev_vocab[i] for i in xrange(500)] # labels = emotes tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. embeddings = tf.get_variable( "embedding_attention_seq2seq/embedding_attention_decoder/embedding" ) reduced_embeddings = tf.gather(embeddings, rows) num_embeddings = reduced_embeddings.eval() print num_embeddings.shape low_dim_embs = tsne.fit_transform(num_embeddings) plot_with_labels(low_dim_embs, labels)
def tsne(): with tf.Session() as sess: # Create model and load parameters. from sklearn import manifold vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # labels = [] # rows = [] # for emote in words: # if emote in vocab: # labels.append(emote) # rows.append(vocab[emote]) rows = [i for i in xrange(500)] labels = [rev_vocab[i] for i in xrange(500)] # labels = emotes tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. embeddings = tf.get_variable("embedding_attention_seq2seq/embedding_attention_decoder/embedding") reduced_embeddings = tf.gather(embeddings,rows) num_embeddings = reduced_embeddings.eval() print num_embeddings.shape low_dim_embs = tsne.fit_transform(num_embeddings) plot_with_labels(low_dim_embs, labels)
def predict(): def _get_test_dataset(): with open(TEST_DATASET_PATH) as test_fh: test_sentences = [s.strip() for s in test_fh.readlines()] return test_sentences results_filename = '_'.join([ 'results', str(FLAGS.num_layers), str(FLAGS.size), str(FLAGS.vocab_size) ]) results_path = os.path.join(FLAGS.results_dir, results_filename) with tf.Session() as sess, open(results_path, 'w') as results_fh: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) test_dataset = _get_test_dataset() i = 0 allright = 0 for sentence in test_dataset: # Get token-ids for the input sentence. if i % 2 == 0: predicted_sentence = get_predicted_sentence( sentence, vocab, rev_vocab, model, sess) item = sentence.strip().split(',') predicted_sentence = item[ 0] + ',' + predicted_sentence + ',' + item[1] print(sentence, ' -> ', predicted_sentence) # results_fh.write(sentence + ' -> ' + predicted_sentence + '\n') if i % 2 == 1: item = predicted_sentence.strip().split(',') sentence = item[0] + ',' + sentence + ',' + item[-1] if sentence == predicted_sentence: allright += 1 print('^ is allright' + '\n') results_fh.write(sentence + '\n' + predicted_sentence + '\n') # results_fh.write('^ is allright'+'\n') else: # print('Error~right is %s' %sentence) # results_fh.write('Error~right is %s' %sentence+'\n') results_fh.write(sentence + '\n' + predicted_sentence + '\n') i = i + 1 print 'traj=', i / 2, ',allright=', allright, ',accuracy=', allright * 1.0 / ( i * 1.0 / 2) results_fh.write('traj=%d,allright=%d,accuracy=%f' % (i / 2, allright, allright * 1.0 / (i * 1.0 / 2)))
def chat(): with tf.Session() as sess: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # Decode from standard input. # sys.stdout.write("> ") sys.stdout.flush() # sentence = sys.stdin.readline() past = collections.deque(maxlen=3) # past.extendleft([sentence]) s = socket.socket() s.connect((cfg.HOST, cfg.PORT)) s.send("PASS {}\r\n".format(cfg.PASS).encode("utf-8")) s.send("NICK {}\r\n".format(cfg.NICK).encode("utf-8")) s.send("JOIN {}\r\n".format(cfg.CHAN).encode("utf-8")) s.setblocking(1) last_sentence = "" while True: try: response = s.recv(1024).decode("utf-8") if response == "PING :tmi.twitch.tv\r\n": s.send("PONG :tmi.twitch.tv\r\n".encode("utf-8",'ignore')) else: text = re.findall('PRIVMSG.+?(?=\:)\:(.+?(?=\r))',response) for msg in text: past.extendleft([msg.encode('utf-8')]) # print past except: pass new=[] for t in past: new.append(t.decode("utf-8",'ignore')) # new.append( t.encode('ascii','ignore') ) sentence= u' '.join(new) # print sentence predicted_sentence = _get_predicted_sentence(sentence.encode('utf-8','replace'), vocab, rev_vocab, model, sess) i = 0 while ("_UNK" in predicted_sentence or predicted_sentence == "") and i<10: i+=1 predicted_sentence = _get_predicted_sentence(sentence.encode('utf-8','replace'), vocab, rev_vocab, model, sess) # predicted_sentence ='Kappa' print(predicted_sentence) send_chat(s,predicted_sentence) sys.stdout.flush() past.extendleft([predicted_sentence]) # print sentence sleep(2)
def chat(): with tf.Session() as sess: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # Decode from standard input. sys.stdout.write("> ") sys.stdout.flush() sentence = sys.stdin.readline() while sentence: predicted_sentence = get_predicted_sentence(sentence, vocab, rev_vocab, model, sess) print(predicted_sentence) print("> ") sys.stdout.flush() sentence = sys.stdin.readline()
def chat(): with tf.Session() as sess: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) # Decode from standard input. sys.stdout.write("> ") sys.stdout.flush() sentence = sys.stdin.readline() while sentence: predicted_sentence = _get_predicted_sentence(sentence, vocab, rev_vocab, model, sess) print(predicted_sentence) print("> ") sys.stdout.flush() sentence = sys.stdin.readline()
print('Socket bind complete') s.listen(10) print('Socket now listening') #now keep talking with the client with tf.Session() as sess: # Create model and load parameters. model = create_model(sess, forward_only=True) model.batch_size = 1 # We decode one sentence at a time. # Load vocabularies. vocab_path = os.path.join(FLAGS.data_dir, "vocab%d.in" % FLAGS.vocab_size) vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path) #wait to accept a connection - blocking call conn, addr = s.accept() print('Connected with ' + addr[0] + ':' + str(addr[1])) while 1: data, address = conn.recvfrom(1024) # reply = 'OK...' + data if not data: continue sentence = data.decode("utf-8", "ignore") print(sentence) predicted_sentence = get_predicted_sentence(sentence, vocab, rev_vocab, model, sess) print(predicted_sentence) conn.sendall(predicted_sentence.encode("utf-8"))