def generate(modelPath,hiddenDim,word_to_index,index_to_word,vocabulary_size,minLength=10,sentStart=[sentence_start_token]):
	model = RNNTheano(vocabulary_size, hidden_dim=hiddenDim)
	# losses = train_with_sgd(model, X_train, y_train, nepoch=50)
	# save_model_parameters_theano('./data/trained-model-theano.npz', model)
	load_model_parameters_theano(modelPath, model)

	def generate_sentence(model):
	    # We start the sentence with the start token
	    new_sentence = [word_to_index[x] for x in [sentence_start_token] + sentStart]
	    # Repeat until we get an end token
	    while not new_sentence[-1] == word_to_index[sentence_end_token]:
	        next_word_probs = model.forward_propagation(new_sentence)
	        sampled_word = word_to_index[unknown_token]
	        # We don't want to sample unknown words
	        while sampled_word == word_to_index[unknown_token]:
	            samples = np.random.multinomial(1, next_word_probs[-1])
	            sampled_word = np.argmax(samples)
	        new_sentence.append(sampled_word)
	    sentence_str = [index_to_word[x] for x in new_sentence[1:-1]]
	    return sentence_str
	 
	num_sentences = 60
	senten_min_length = minLength
	 
	for i in range(num_sentences):
	    sent = []
	    # We want long sentences, not sentences with one or two words
	    while len(sent) < senten_min_length:
	        sent = generate_sentence(model)
	    print " ".join(sent)
def train(X_train,y_train,vocabulary_size,hiddenDim,modelFiles):
	model = RNNTheano(vocabulary_size, hidden_dim=hiddenDim)
	t1 = time.time()
	model.sgd_step(X_train[10], y_train[10], _LEARNING_RATE)
	t2 = time.time()
	print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)

	if modelFiles != None:
	    load_model_parameters_theano(modelFiles, model)

	train_with_sgd(model, X_train, y_train, nepoch=_NEPOCH, learning_rate=_LEARNING_RATE)
# Create the training data
print("Creating training data")
X_train = np.asarray([[word_to_index[w] for w in sent[:-1]]
                      for sent in tokenized_sentences])
y_train = np.asarray([[word_to_index[w] for w in sent[1:]]
                      for sent in tokenized_sentences])

model = GRUTheano(vocabulary_size, hidden_dim=_HIDDEN_DIM, bptt_truncate=-1)

t1 = time.time()
model.sgd_step(X_train[10], y_train[10], _LEARNING_RATE)
t2 = time.time()
print "SGD Step time: %f milliseconds" % ((t2 - t1) * 1000.)

if _MODEL_FILE != None:
    ut.load_model_parameters_theano(_MODEL_FILE, model)

for epoch in range(_NEPOCH):
    train_with_sgd(model,
                   X_train,
                   y_train,
                   nepoch=1,
                   learning_rate=_LEARNING_RATE,
                   decay=0.9,
                   callback_every=PRINT_EVERY,
                   callback=sgd_callback)


##################################################################
##    Generating stuff from the model
##################################################################
Exemple #4
0
from utils import load_model_parameters_theano, save_model_parameters_theano
# from train import *

model = RNNTheano(vocabulary_size, hidden_dim=50)
# losses = train_with_sgd(model, X_train, y_train, nepoch=50)
# save_model_parameters_theano('./data/trained-model-theano.npz', model)
load_model_parameters_theano('./data/trained-model-theano.npz', model)


def generate_sentence(model):
    # We start the sentence with the start token
    new_sentence = [word_to_index[sentence_start_token]]
    # Repeat until we get an end token
    while not new_sentence[-1] == word_to_index[sentence_end_token]:
        next_word_probs = model.forward_propagation(new_sentence)
        sampled_word = word_to_index[unknown_token]
        # We don't want to sample unknown words
        while sampled_word == word_to_index[unknown_token]:
            samples = np.random.multinomial(1, next_word_probs[-1])
            sampled_word = np.argmax(samples)
        new_sentence.append(sampled_word)
    sentence_str = [index_to_word[x] for x in new_sentence[1:-1]]
    return sentence_str


num_sentences = 10
senten_min_length = 7

for i in range(num_sentences):
    sent = []
    # We want long sentences, not sentences with one or two words
Exemple #5
0
model = RNNumpy(vocabsize)
losses = trainwithsgd(model, Xtrain[:100], ytrain[:100], nepoch=10, evaluate_loss_after=1)



np.random.seed(10) #FLAG
model = RNNTheano(vocabsize)
model.sgd_step(Xtrain[10], ytrain[10], 0.005)
'''

from utils import load_model_parameters_theano, save_model_parameters_theano

model = RNNTheano(vocabsize, hiddendim=50)
# losses = train_with_sgd(model, X_train, y_train, nepoch=50)
# save_model_parameters_theano('./data/trained-model-theano.npz', model)
load_model_parameters_theano('/home/ihasdapie/Documents/AI/Data/trained-model-theano.npz', model)
def generate_sentence(model):
	# We start the sentence with the start token
	new_sentence = [wordtoindex[starttoken]]
	# Repeat until we get an end token
	while not new_sentence[-1] == wordtoindex[endtoken]:
		next_word_probs = model.forward_propagation(new_sentence)
		sampled_word = wordtoindex[unknowntoken]
		# We don't want to sample unknown words
		while sampled_word == wordtoindex[unknowntoken]:
			samples = np.random.multinomial(1, next_word_probs[-1])
			sampled_word = np.argmax(samples)
		new_sentence.append(sampled_word)
	sentence_str = [indextoword[x] for x in new_sentence[1:-1]]
	return sentence_str
Exemple #6
0
import commands

[X_train, y_train, mask] = load_data(["../pinyin_sample_0"])

model_dir = "model/"

print "training samples: %d" % (len(X_train))
print "pinyin vocabulary: %d" % (len(utils.pinyin_dict))
print "character vocabulary: %d" % (len(utils.character_dict))

(status, output) = commands.getstatusoutput("ls -l " + model_dir + "*.npz")

latest_model = output.split(" ")[-1]

model = utils.load_model_parameters_theano(latest_model)

zhuanming_dict = utils.load_dict("zhuanming_dict")

ins_count = len(X_train)
test_ins_count = int(ins_count * 0.01)
train_ins_count = ins_count - test_ins_count
print "ins_count: %d, test_ins_count: %d, train_ins_count: %d" % (
    ins_count, test_ins_count, train_ins_count)

X_test = X_train[train_ins_count:]
y_test = y_train[train_ins_count:]

correct = 0
total = 0
zhuanming_hit = 0
    with open('data/trained-model-theano.dic', 'wb') as f:
        pickle.dump([index_to_word, word_to_index], f)
        print "\nSave index_to_word, word_to_index to 'data/trained-model-theano.dic'"
    return index_to_word, word_to_index

if os.path.exists('./data/trained-model-theano.dic'):
    with open('./data/trained-model-theano.dic') as f:
        index_to_word, word_to_index = pickle.load(f)
        print "\nLoad index_to_word, word_to_index from 'data/trained-model-theano.dic'"
else:
    index_to_word, word_to_index = generate_dic()
 
model = RNNTheano(vocabulary_size, hidden_dim=50)
# losses = train_with_sgd(model, X_train, y_train, nepoch=50)
# save_model_parameters_theano('./data/trained-model-theano.npz', model)
load_model_parameters_theano('./data/trained-model-theano.npz', model)

def generate_sentence(model, prefix=""):
    # We start the sentence with the start token
    prefix_words = [sentence_start_token] + nltk.word_tokenize(prefix)
    new_sentence = [word_to_index[w if w in word_to_index else unknown_token] for w in prefix_words]
    # Repeat until we get an end token
    while not new_sentence[-1] == word_to_index[sentence_end_token]:
        next_word_probs = model.forward_propagation(new_sentence)
        sampled_word = word_to_index[unknown_token]
        # We don't want to sample unknown words
        while sampled_word == word_to_index[unknown_token]:
            samples = np.random.multinomial(1, next_word_probs[-1])
            sampled_word = np.argmax(samples)
        new_sentence.append(sampled_word)
    sentence_str = [index_to_word[x] for x in new_sentence[1:-1]]
import os
from train_theano import vocabulary_size, _HIDDEN_DIM, index_to_word, word_to_index,\
    sentence_end_token, sentence_start_token, unknown_token, model
import numpy as np

_VOCABULARY_SIZE = int(os.environ.get('VOCABULARY_SIZE', '3000'))  # orig 8000
_HIDDEN_DIM = int(os.environ.get('HIDDEN_DIM', '50'))  # orig 50
_LEARNING_RATE = float(os.environ.get('LEARNING_RATE', '0.005'))  # orig 0.005
_NEPOCH = int(os.environ.get('NEPOCH', '10'))  # orig 100
_MODEL_FILE = os.environ.get('MODEL_FILE')

print "a"
# losses = train_with_sgd(model, X_train, y_train, nepoch=50)
# save_model_parameters_theano('./data/trained-model-theano.npz', model)
print "Loading model parameters..."
load_model_parameters_theano(
    './data/rnn-theano-50-3000-2016-10-10-16-49-27.npz', model)


def generate_sentence(model):
    # We start the sentence with the start token
    new_sentence = [word_to_index[sentence_start_token]]
    # Repeat until we get an end token
    while not new_sentence[-1] == word_to_index[sentence_end_token]:
        next_word_probs = model.forward_propagation(new_sentence)
        sampled_word = word_to_index[unknown_token]
        # We don't want to sample unknown words
        while sampled_word == word_to_index[unknown_token]:
            samples = np.random.multinomial(1, next_word_probs[-1])
            sampled_word = np.argmax(samples)
        new_sentence.append(sampled_word)
    sentence_str = [index_to_word[x] for x in new_sentence[1:-1]]
Exemple #9
0
    # Print SGD step time
    def sgd_callback(model, num_examples_seen):
        dt = datetime.now().isoformat()
        loss = model.calculate_loss(x_train[:10000], y_train[:10000])
        print("\n%s (%d)" % (dt, num_examples_seen))
        print("--------------------------------------------------")
        print("Loss: %f" % loss)
        generate_sentences_from_scratch(model, 10, index_to_word,
                                        word_to_index)
        save_model_parameters_theano(model, MODEL_OUTPUT_FILE)
        print("\n")
        sys.stdout.flush()

    for epoch in range(NEPOCH):
        train_with_sgd(model,
                       x_train,
                       y_train,
                       learning_rate=LEARNING_RATE,
                       nepoch=1,
                       decay=0.9,
                       callback_every=PRINT_EVERY,
                       callback=sgd_callback)
else:
    model = utils.load_model_parameters_theano("data/pretrained-theano.npz")
    generate_sentences_from_scratch(model, 100, index_to_word, word_to_index)

    print(" == Prefixed sentences == ")
    generate_sentences_by_prefixes(
        model, [["i", "am"], ["robots", "stop"], ["it", "depends"],
                ["i", "am", "going"], ["you", "are", "ridiculous"]],
        index_to_word, word_to_index)
Exemple #10
0
test_label = dio.get_test_gt()

mean_ = np.mean(train_data, axis=0)
rand_data -= mean_

model = rnn.RNNTheano()
if TRAIN:
    EPOCH  = 1
    n = train_data.shape[0] / model.batch_size
    j = 0
    for it in xrange(EPOCH):
        np.random.shuffle(rand_data)
        for i in xrange(n):
            dt = rand_data[i*model.batch_size:(i+1)*model.batch_size]
            rt = model.sgd_step(dt)
            time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
            print("%s  iterators :%6d, loss : %f  " % (time, j, rt[0]))
            j += 1
    if os.path.exists('models')==False:
        os.makedirs("models")
    utils.save_model_parameters_theano('models/models.npz',model)
utils.load_model_parameters_theano('models/models.npz',model)
d = train_data - mean_
p = model.predict(d)
B = utils.num2bit(p)
test_data -= mean_
query = model.predict(test_data)
query_b = utils.num2bit(query)
print "start calculate map ..."
print utils.cat_map(B,train_label,query_b,test_label)




line_start_token = "LINE_START"
line_end_token = "LINE_END"
dictFile = 'dictFile.txt'
with open(dictFile) as f:
    dicts = []
    for line in f:
        line = ast.literal_eval(line)
        dicts.append(line)
char_to_code_dict, code_to_char_dict = dicts

load_model_parameters_theano('saved_model_parameters/{0}'.format(DATAFILE), MODEL)

def one_hot(x):
    oneHot = np.zeros(82)
    oneHot[x] = 1
    return oneHot

e = np.array([math.e for _ in xrange(82)])
def generate_sentence(model):
    # We start the sentence with the start token
    new_sentence = [one_hot(char_to_code_dict[line_start_token])]
    # Repeat until we get an end token
    sampled_letter = None
    counter=0
    while sampled_letter != char_to_code_dict[line_end_token]:
        counter+=1
Exemple #12
0
#from gru_theano import *
from lstm_theano import *
import sys


# In[4]:

# Load data (this may take a few minutes)
VOCABULARY_SIZE = 8000
X_train, y_train, word_to_index, index_to_word = load_data("data/lyrics.txt", VOCABULARY_SIZE)


# In[21]:

# Load parameters of pre-trained model
model = load_model_parameters_theano('./data/LSTM-2016-04-12-05-40-8000-48-128.dat.npz')


# In[2]:

# Build your own model (not recommended unless you have a lot of time!)

LEARNING_RATE = 1e-3
NEPOCH = 20
HIDDEN_DIM = 128

 #model = LSTMTheano(VOCABULARY_SIZE, HIDDEN_DIM)

 #t1 = time.time()
 #model.sgd_step(X_train[0], y_train[0], LEARNING_RATE)
 #t2 = time.time()
Exemple #13
0
# -*- coding:UTF-8 -*-
import sys
import cPickle
import numpy as np
from utils import load_model_parameters_theano

fn = 'word2idx.pkl'
with open(fn, 'r') as f:                   
    idx2word=cPickle.load(f)
    f.close()       
fn1 = 'label2idx.pkl'
with open(fn1, 'r') as f1:                   
    idx2label=cPickle.load(f1)
    f1.close()
model = load_model_parameters_theano('./data/model.npz')    
def test():
    le=sys.argv
    del le[0]
    b=[]
    for x in le[:]:
        if x in idx2word.values():
            b.append( idx2word.values().index(x)) 
        else:
            b.append(len(idx2word)-1) 
    print b
    y=model.predict(b)
    label=[]
    for x in y[:]:
        samples1 = np.random.multinomial(1, x)
        sampled_word1 = np.argmax(samples1)
        label.append(idx2label[sampled_word1])