maxlen=100  # Sequence longer then this get ignored
batch_size=16  # The batch size during training.
valid_batch_size=64  # The batch size used for validation/test set.
dataset='imdb'

noise_std=0.
use_dropout=True  # if False slightly faster but worst test error
                   # This frequently need a bigger model.
reload_model=None  # Path to a saved model we want to start from.
test_size=-1  # If >0 we keep only this number of test example.

model_options = locals().copy()
# print "model options", model_options


X, Y, X_test, Y_test = mdn_lstm.load_data()


params = mdn_lstm.init_params(model_options)
tparams = mdn_lstm.init_tparams(params)
(use_noise, x, mask,
     y, f_pred_prob, f_pred, cost) = mdn_lstm.build_model(tparams, model_options)

f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=tparams.values())
f_grad = theano.function([x, mask, y], grads, name='f_grad')

lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
                                    x, mask, y, cost)
# Parameter for extra option
noise_std=0.
use_dropout=True  # if False slightly faster, but worst test error
                       # This frequently need a bigger model.
reload_model=None  # Path to a saved model we want to start from.
test_size=-1  # If >0, we keep only this number of test example.

# Model options
model_options = locals().copy()
# print( "model options", model_options)
    
load_data, prepare_data = imdb.load_data, imdb.prepare_data
# 

print( 'Loading data')
X, Y, X_test, Y_test = mdn_lstm.load_data(predict=dim_proj)

n_components = 3

model_options['n_components'] = n_components

print( 'Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray


# params = init_params(model_options)
# unfoled into a thing
params = OrderedDict()
# embedding
randn = numpy.random.rand(model_options['n_words'],