Example #1
0
def load_shit():
	text_test = './../texts/melville.txt'
	char_map_obj = Character_Map(text_test,'mapping.dat',overwrite=True, break_line=None)
	unique_char = char_map_obj.unique_char
	char_map_obj.k_map()
	x, y, shared_x, shared_y = char_map_obj.gen_x_and_y(filename=None)
	# print(shared_x, shared_y.get_value().shape[0])
	nh = 100
	nx = len(char_map_obj.unique_char)
	ny = nx 

	trainer = RNN(nh,nx,ny)
	trainer.load_param('param_6-10_17:52/param_epoch199.dat')

	f = trainer.compile_gen_sentence()

	for xi in x[100:150]:
		y_guess = f(xi[0])
		y_argmax = [np.argmax(y) for y in y_guess] 
		char_y = [unique_char[int(yi)] for yi in y_argmax]
		print(char_y)
		print(''.join(char_y))


	return trainer, char_map_obj, x, y, shared_x, shared_y
Example #2
0
def train_NN(mu, n_epoch, mini_batch):
	"""
	Train the neural net 
	"""
	text_test = './../texts/melville.txt'
	char_map_obj = Character_Map(text_test,'mapping.dat',overwrite=True, break_line=None)
	char_map_obj.k_map()
	x, y, shared_x, shared_y = char_map_obj.gen_x_and_y(filename=None)

	nh = 100
	nx = len(char_map_obj.unique_char)
	ny = nx 

	trainer = RNNClass(nh,nx,ny)
	trainer.train_index((shared_x,shared_y),mu,n_epoch,mini_batch)
Example #3
0
def main_test():
    rng = np.random.RandomState(1234)
    filename = './../texts/melville.txt'
    foo = Character_Map(filename,'mapping.dat',overwrite=True)
    # print(len(foo.mapping))
    map_matrix = foo.k_map()
    train, valid, test = foo.gen_train_valid_test(filename=None)
    # print(train[1].get_value().dtype)
    # print(train[1].get_value()[:10].shape)
    x = T.tensor3('x')
    y = T.imatrix('y')
    # x = T.matrix('x')
    # rnnlayer = RNNlayer(x,77,77)
    # f = theano.function(inputs=[x], outputs=rnnlayer.output)
    # foo = f(train[0].get_value()[:10])
    # print(foo.shape)
    rnn = RNN(x,[77],rng=rng) #the number of unique characters in Moby Dick 
    ftest = theano.function(inputs=[x], outputs=rnn.p_y_given_x)
    # print(ftest(train[0].get_value()[:10]).shape)
    print("Compiling training and testing functions...")
    t0 = time.time()
    ftrain = theano.function(inputs=[x,y],outputs=rnn.neg_log_likelihood(y))
    # ftest = theano.function(inputs=[x,y], outputs=rnn.error(y))
    # ftest1 = theano.function(inputs=[x,y],outputs=[rnn.y_pred, y])
    print("Completed compiling functions. Took {:.2f} seconds".format(time.time() - t0))
    for i in xrange(2):
        print(ftrain(train[0].get_value()[i*10:(i+1)*10], train[1].get_value()[i*10:(i+1)*10]))
Example #4
0
	# 	and returns y vectors for each of the subsequent positions. 
	# 	"""
	# 	x = T.vector('x')
	# 	y = self.gen_random_sentence(x)

	# 	f = theano.function([x],y)

	# 	return f 



if __name__ == '__main__':

	text_test = './../texts/melville.txt'
	char_map_obj = Character_Map(text_test,'mapping.dat',overwrite=True, break_line=None)
	char_map_obj.k_map()
	x, y, shared_x, shared_y = char_map_obj.gen_x_and_y(filename=None)
	# print(shared_x, shared_y.get_value().shape[0])
	nh = 100
	nx = len(char_map_obj.unique_char)
	ny = nx 

	trainer = RNNClass(nh,nx,ny)
	# jobs = []
	# for i in xrange(2):
	# 	p = multiprocessing.Process(target=trainer.train, args=((shared_x,shared_y),0.03,1000,10,))
	# 	jobs.append(p)
	# 	p.start()
	# trainer.load_param('param_epoch95.dat')
	trainer.train_index(training_data=(shared_x,shared_y),
						learning_rate=0.01,
Example #5
0
def test_train_RNN(**kwargs):
    """
    kwargs
    """
    filename = kwargs.get('filename','./../texts/melville.txt')
    n_hidden = kwargs.get('n_hidden',77)
    n_epochs = kwargs.get('n_epochs',100)
    minibatch_size = kwargs.get('minibatch_size',100)
    lr = kwargs.get('lr',0.01)

    charmap = Character_Map(filename,'mapping.dat',overwrite=True)
    charmap.k_map()
    train, valid, test = charmap.gen_train_valid_test(filename=None)

    train_set_x, train_set_y = train
    valid_set_x, valid_set_y = valid 
    test_set_x, test_set_y = test

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // minibatch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // minibatch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // minibatch_size
    # print(n_train_batches, n_valid_batches, n_test_batches)
    print("Train size: {}, valid size {}, test size {}".format(train_set_x.get_value(borrow=True).shape[0],
                                                                valid_set_x.get_value(borrow=True).shape[0],
                                                                test_set_x.get_value(borrow=True).shape[0]))

    index = T.lscalar()
    x = T.tensor3('x')
    y = T.imatrix('y')

    rng = np.random.RandomState(1234)

    rnn = RNN(x,[n_hidden]) #i need to change this to take into account different in and out sizes. 

    cost = rnn.neg_log_likelihood(y)
    print("Compiling training, testing and validating functions...")
    t0 = time.time()
    test_model = theano.function(
            inputs=[index],
            outputs=rnn.error(y),
            givens={
                x: test_set_x[index * minibatch_size:(index + 1) * minibatch_size],
                y: test_set_y[index * minibatch_size:(index + 1) * minibatch_size]
            }

        )

    valid_model = theano.function(
            inputs=[index],
            outputs=rnn.error(y),
            givens={
                x: valid_set_x[index * minibatch_size:(index + 1) * minibatch_size],
                y: valid_set_y[index * minibatch_size:(index + 1) * minibatch_size]
            }
        )

    gparams = [T.grad(cost, param) for param in rnn.params]

    updates = [
        (param, param-lr*gparam) for param, gparam in zip(rnn.params,gparams)
    ]

    train_model = theano.function(
            inputs = [index],
            outputs = cost,
            updates = updates,
            givens = {
                x: train_set_x[index * minibatch_size:(index + 1) * minibatch_size],
                y: train_set_y[index * minibatch_size:(index + 1) * minibatch_size]
            }
        )
    print("Completed compiling functions. Took {:.2f} seconds".format(time.time() - t0))
    print("Starting training...")
    valid_freq = 4
    best_valid = np.inf 
    for epoch in xrange(n_epochs):
        for minibatch_index in xrange(n_train_batches-1):
            mean_cost = train_model(minibatch_index)
            iteration_number = epoch*n_train_batches + minibatch_index
            if iteration_number % valid_freq == 0:
                valid_losses = np.array([valid_model(i) for i in xrange(n_valid_batches)])
                # print(valid_losses)
                mean_valid = np.mean(valid_losses)
                print("Minibatch number: {}\nEpoch number: {}\nValidation Error {}".format(minibatch_index,epoch,mean_valid))
                if mean_valid < best_valid:
                    best_valid = mean_valid
                    print("Best Validation so far: {}".format(best_valid))
            else:
                print("Number of iterations: {}, cost {}".format(iteration_number,mean_cost))

    print("Done optimizing")
Example #6
0
def load_dataset(filename):
    foo = Character_Map(filename,'mapping.dat',overwrite=True)
    # print(len(foo.mapping))
    map_matrix = foo.k_map()
    return foo.gen_train_valid_test(filename=None)