def test_sequence_generator(): # Disclaimer: here we only check shapes, not values. output_dim = 1 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent( name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator( LinearReadout(readout_dim=output_dim, source_names=["states"], emitter=TestEmitter(name="emitter"), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.initialize() y = tensor.tensor3('y') mask = tensor.matrix('mask') costs = generator.cost(y, mask) assert costs.ndim == 2 costs_val = theano.function([y, mask], [costs])( numpy.zeros((n_steps, batch_size, output_dim), dtype=floatX), numpy.ones((n_steps, batch_size), dtype=floatX))[0] assert costs_val.shape == (n_steps, batch_size) states, outputs, costs = [variable.eval() for variable in generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps)] assert states.shape == (n_steps, batch_size, dim) assert outputs.shape == (n_steps, batch_size, output_dim) assert costs.shape == (n_steps, batch_size)
def test_sequence_generator(): # Disclaimer: here we only check shapes, not values. output_dim = 1 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent( name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator( LinearReadout(readout_dim=output_dim, source_names=["states"], emitter=TestEmitter(name="emitter"), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.initialize() y = tensor.tensor3('y') mask = tensor.matrix('mask') costs = generator.cost(y, mask) assert costs.ndim == 2 costs_val = theano.function([y, mask], [costs])( numpy.zeros((n_steps, batch_size, output_dim), dtype=floatX), numpy.ones((n_steps, batch_size), dtype=floatX))[0] assert costs_val.shape == (n_steps, batch_size) states, outputs, costs = [variable.eval() for variable in generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps)] assert states.shape == (n_steps, batch_size, dim) assert outputs.shape == (n_steps, batch_size, output_dim) assert costs.shape == (n_steps, batch_size)
def test_integer_sequence_generator(): # Disclaimer: here we only check shapes, not values. readout_dim = 5 feedback_dim = 3 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator(LinearReadout( readout_dim=readout_dim, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback(readout_dim, feedback_dim), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.initialize() y = tensor.lmatrix('y') mask = tensor.matrix('mask') costs = generator.cost(y, mask) assert costs.ndim == 2 costs_val = theano.function([y, mask], [costs])(numpy.zeros((n_steps, batch_size), dtype='int64'), numpy.ones((n_steps, batch_size), dtype=floatX))[0] assert costs_val.shape == (n_steps, batch_size) states, outputs, costs = generator.generate(iterate=True, batch_size=batch_size, n_steps=n_steps) states_val, outputs_val, costs_val = theano.function( [], [states, outputs, costs], updates=costs.owner.inputs[0].owner.tag.updates)() assert states_val.shape == (n_steps, batch_size, dim) assert outputs_val.shape == (n_steps, batch_size) assert outputs_val.dtype == 'int64' assert costs_val.shape == (n_steps, batch_size)
def test_integer_sequence_generator(): # Disclaimer: here we only check shapes, not values. readout_dim = 5 feedback_dim = 3 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent( name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator( LinearReadout(readout_dim=readout_dim, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback(readout_dim, feedback_dim), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.initialize() y = tensor.lmatrix('y') mask = tensor.matrix('mask') costs = generator.cost(y, mask) assert costs.ndim == 2 costs_val = theano.function([y, mask], [costs])( numpy.zeros((n_steps, batch_size), dtype='int64'), numpy.ones((n_steps, batch_size), dtype=floatX))[0] assert costs_val.shape == (n_steps, batch_size) states, outputs, costs = generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps) states_val, outputs_val, costs_val = theano.function( [], [states, outputs, costs], updates=costs.owner.inputs[0].owner.tag.updates)() assert states_val.shape == (n_steps, batch_size, dim) assert outputs_val.shape == (n_steps, batch_size) assert outputs_val.dtype == 'int64' assert costs_val.shape == (n_steps, batch_size)
def main(mode, save_path, steps, time_budget, reset): num_states = ChainDataset.num_states if mode == "train": # Experiment configuration rng = numpy.random.RandomState(1) batch_size = 50 seq_len = 100 dim = 10 feedback_dim = 8 # Build the bricks and initialize them transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim) generator = SequenceGenerator( LinearReadout(readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback( num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() logger.info("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) logger.info("Markov chain entropy: {}".format( ChainDataset.entropy)) logger.info("Expected min error: {}".format( -ChainDataset.entropy * seq_len * batch_size)) if os.path.isfile(save_path) and not reset: model = Pylearn2Model.load(save_path) else: model = Pylearn2Model(generator) # Build the cost computation graph. # Note: would be probably nicer to make cost part of the model. x = tensor.ltensor3('x') cost = Pylearn2Cost(model.brick.cost(x[:, :, 0]).sum()) dataset = ChainDataset(rng, seq_len) sgd = SGD(learning_rate=0.0001, cost=cost, batch_size=batch_size, batches_per_iter=10, monitoring_dataset=dataset, monitoring_batch_size=batch_size, monitoring_batches=1, learning_rule=Pylearn2LearningRule( SGDLearningRule(), dict(training_objective=cost.cost))) train = Pylearn2Train(dataset, model, algorithm=sgd, save_path=save_path, save_freq=10) train.main_loop(time_budget=time_budget) elif mode == "sample": model = Pylearn2Model.load(save_path) generator = model.brick sample = ComputationGraph(generator.generate( n_steps=steps, batch_size=1, iterate=True)).function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, ChainDataset.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, ChainDataset.trans_prob)) else: assert False
def main(mode, save_path, steps, num_batches): num_states = MarkovChainDataset.num_states if mode == "train": # Experiment configuration rng = numpy.random.RandomState(1) batch_size = 50 seq_len = 100 dim = 10 feedback_dim = 8 # Build the bricks and initialize them transition = GatedRecurrent(name="transition", dim=dim, activation=Tanh()) generator = SequenceGenerator( Readout(readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedback_brick=LookupFeedback( num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() # Give an idea of what's going on. logger.info("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) logger.info("Markov chain entropy: {}".format( MarkovChainDataset.entropy)) logger.info("Expected min error: {}".format( -MarkovChainDataset.entropy * seq_len)) # Build the cost computation graph. x = tensor.lmatrix('data') cost = aggregation.mean(generator.cost_matrix(x[:, :]).sum(), x.shape[1]) cost.name = "sequence_log_likelihood" algorithm = GradientDescent( cost=cost, params=list(Selector(generator).get_params().values()), step_rule=Scale(0.001)) main_loop = MainLoop( algorithm=algorithm, data_stream=DataStream( MarkovChainDataset(rng, seq_len), iteration_scheme=ConstantScheme(batch_size)), model=Model(cost), extensions=[FinishAfter(after_n_batches=num_batches), TrainingDataMonitoring([cost], prefix="this_step", after_batch=True), TrainingDataMonitoring([cost], prefix="average", every_n_batches=100), Checkpoint(save_path, every_n_batches=500), Printing(every_n_batches=100)]) main_loop.run() elif mode == "sample": main_loop = cPickle.load(open(save_path, "rb")) generator = main_loop.model sample = ComputationGraph(generator.generate( n_steps=steps, batch_size=1, iterate=True)).get_theano_function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(theano.config.floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, MarkovChainDataset.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=theano.config.floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, MarkovChainDataset.trans_prob)) else: assert False
transition=transition, name="generator") generator.weights_init = IsotropicGaussian(0.01) generator.biases_init = Constant(0.) generator.push_initialization_config() generator.transition.biases_init = IsotropicGaussian(0.01, 1) generator.transition.push_initialization_config() #steps = 2048 steps = 8 n_samples = 1 sample = ComputationGraph( generator.generate(n_steps=steps, batch_size=n_samples, iterate=True)) sample_fn = sample.get_theano_function() generator.initialize() #-2 outputs = sample_fn()[0] print outputs states = {} states = generator.transition.apply.outputs states = { name: shared_floatx_zeros((batch_size, hidden_size_recurrent)) for name in states }
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of generating a Markov chain with RNN.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "sample"], help="The mode to run. Use `train` to train a new model" " and `sample` to sample a sequence generated by an" " existing one.") parser.add_argument("prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument("--steps", type=int, default=100, help="Number of steps to plot") args = parser.parse_args() dim = 10 num_states = ChainIterator.num_states feedback_dim = 8 transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim) generator = SequenceGenerator(LinearReadout( readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback(num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": rng = numpy.random.RandomState(1) batch_size = 50 generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() logger.debug("transition.weights_init={}".format( transition.weights_init)) cost = generator.cost(tensor.lmatrix('x')).sum() gh_model = GroundhogModel(generator, cost) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() data = ChainIterator(rng, 100, batch_size) trainer = SGD(gh_model, state, data) main_loop = MainLoop(data, None, None, gh_model, trainer, state, None) main_loop.main() elif args.mode == "sample": load_params(generator, args.prefix + "model.npz") sample = ComputationGraph( generator.generate(n_steps=args.steps, batch_size=1, iterate=True)).function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, ChainIterator.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, ChainIterator.trans_prob)) else: assert False
def main(mode, save_path, num_batches, from_dump): if mode == "train": # Experiment configuration dimension = 100 readout_dimension = len(char2code) # Data processing pipeline data_stream = DataStreamMapping( mapping=lambda data: tuple(array.T for array in data), data_stream=PaddingDataStream( BatchDataStream( iteration_scheme=ConstantScheme(10), data_stream=DataStreamMapping( mapping=reverse_words, add_sources=("targets", ), data_stream=DataStreamFilter( predicate=lambda data: len(data[0]) <= 100, data_stream=OneBillionWord( "training", [99], char2code, level="character", preprocess=str.lower).get_default_stream()))))) # Build the model chars = tensor.lmatrix("features") chars_mask = tensor.matrix("features_mask") targets = tensor.lmatrix("targets") targets_mask = tensor.matrix("targets_mask") encoder = Bidirectional(GatedRecurrent(dim=dimension, activation=Tanh()), weights_init=Orthogonal()) encoder.initialize() fork = Fork([ name for name in encoder.prototype.apply.sequences if name != 'mask' ], weights_init=IsotropicGaussian(0.1), biases_init=Constant(0)) fork.input_dim = dimension fork.fork_dims = {name: dimension for name in fork.fork_names} fork.initialize() lookup = LookupTable(readout_dimension, dimension, weights_init=IsotropicGaussian(0.1)) lookup.initialize() transition = Transition(activation=Tanh(), dim=dimension, attended_dim=2 * dimension, name="transition") attention = SequenceContentAttention( state_names=transition.apply.states, match_dim=dimension, name="attention") readout = LinearReadout(readout_dim=readout_dimension, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback( readout_dimension, dimension), name="readout") generator = SequenceGenerator(readout=readout, transition=transition, attention=attention, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0), name="generator") generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() bricks = [encoder, fork, lookup, generator] # Give an idea of what's going on params = Selector(bricks).get_params() logger.info("Parameters:\n" + pprint.pformat([(key, value.get_value().shape) for key, value in params.items()], width=120)) # Build the cost computation graph batch_cost = generator.cost( targets, targets_mask, attended=encoder.apply(**dict_union(fork.apply( lookup.lookup(chars), return_dict=True), mask=chars_mask)), attended_mask=chars_mask).sum() batch_size = named_copy(chars.shape[1], "batch_size") cost = aggregation.mean(batch_cost, batch_size) cost.name = "sequence_log_likelihood" logger.info("Cost graph is built") # Fetch variables useful for debugging max_length = named_copy(chars.shape[0], "max_length") cost_per_character = named_copy( aggregation.mean(batch_cost, batch_size * max_length), "character_log_likelihood") cg = ComputationGraph(cost) energies = unpack(VariableFilter(application=readout.readout, name="output")(cg.variables), singleton=True) min_energy = named_copy(energies.min(), "min_energy") max_energy = named_copy(energies.max(), "max_energy") (activations, ) = VariableFilter( application=generator.transition.apply, name="states")(cg.variables) mean_activation = named_copy(activations.mean(), "mean_activation") # Define the training algorithm. algorithm = GradientDescent(cost=cost, step_rule=CompositeRule([ GradientClipping(10.0), SteepestDescent(0.01) ])) observables = [ cost, min_energy, max_energy, mean_activation, batch_size, max_length, cost_per_character, algorithm.total_step_norm, algorithm.total_gradient_norm ] for name, param in params.items(): observables.append(named_copy(param.norm(2), name + "_norm")) observables.append( named_copy(algorithm.gradients[param].norm(2), name + "_grad_norm")) main_loop = MainLoop( model=bricks, data_stream=data_stream, algorithm=algorithm, extensions=([LoadFromDump(from_dump)] if from_dump else []) + [ Timing(), TrainingDataMonitoring(observables, after_every_batch=True), TrainingDataMonitoring( observables, prefix="average", every_n_batches=10), FinishAfter(after_n_batches=num_batches).add_condition( "after_batch", lambda log: math.isnan( log.current_row.total_gradient_norm)), Plot(os.path.basename(save_path), [["average_" + cost.name], ["average_" + cost_per_character.name]], every_n_batches=10), SerializeMainLoop(save_path, every_n_batches=500, save_separately=["model", "log"]), Printing(every_n_batches=1) ]) main_loop.run() elif mode == "test": with open(save_path, "rb") as source: encoder, fork, lookup, generator = dill.load(source) logger.info("Model is loaded") chars = tensor.lmatrix("features") generated = generator.generate( n_steps=3 * chars.shape[0], batch_size=chars.shape[1], attended=encoder.apply(**dict_union( fork.apply(lookup.lookup(chars), return_dict=True))), attended_mask=tensor.ones(chars.shape)) sample_function = ComputationGraph(generated).get_theano_function() logging.info("Sampling function is compiled") while True: # Python 2-3 compatibility line = input("Enter a sentence\n") batch_size = int(input("Enter a number of samples\n")) encoded_input = [ char2code.get(char, char2code["<UNK>"]) for char in line.lower().strip() ] encoded_input = ([char2code['<S>']] + encoded_input + [char2code['</S>']]) print("Encoder input:", encoded_input) target = reverse_words((encoded_input, ))[0] print("Target: ", target) states, samples, glimpses, weights, costs = sample_function( numpy.repeat(numpy.array(encoded_input)[:, None], batch_size, axis=1)) messages = [] for i in range(samples.shape[1]): sample = list(samples[:, i]) try: true_length = sample.index(char2code['</S>']) + 1 except ValueError: true_length = len(sample) sample = sample[:true_length] cost = costs[:true_length, i].sum() message = "({})".format(cost) message += "".join(code2char[code] for code in sample) if sample == target: message += " CORRECT!" messages.append((cost, message)) messages.sort(key=lambda tuple_: -tuple_[0]) for _, message in messages: print(message)
if '/generator/readout/emitter/mlp/' in k: v = parameters2.pop(k) parameters2[k.replace('/generator/readout/emitter/mlp/', '/generator/readout/emitter/gmm_emitter/gmmmlp/mlp/') ] = v model.set_parameter_values(parameters2) # import ipdb # ipdb.set_trace() #print function([f0, sp, voiced], cost_matrix, updates = extra_updates)(x_tr[0],x_tr[1],x_tr[2]) #generator.generate(n_steps=steps, batch_size=n_samples, iterate=True, **states) #states = {} sample = ComputationGraph(generator.generate(n_steps=steps, batch_size=n_samples, iterate=True)) sample_fn = sample.get_theano_function() outputs_bp = sample_fn()[-2] for this_sample in range(n_samples): print "Iteration: ", this_sample outputs = outputs_bp sampled_f0 = outputs[:,:,-2] sampled_voiced = outputs[:,:,-1] print sampled_voiced.mean() print sampled_f0.max(), sampled_f0.min() outputs = outputs[:,:,:-2]
def __init__(self, config, vocab_size): context = tensor.imatrix('context') context_mask = tensor.imatrix('context_mask') answer = tensor.imatrix('answer') answer_mask = tensor.imatrix('answer_mask') bricks = [] context = context.dimshuffle(1, 0) context_mask = context_mask.dimshuffle(1, 0) answer = answer.dimshuffle(1, 0) answer_mask = answer_mask.dimshuffle(1, 0) context_bag = to_bag(context, vocab_size) # Embed questions and context embed = LookupTable(vocab_size, config.embed_size, name='embed') embed.weights_init = IsotropicGaussian(0.01) #embeddings_initial_value = init_embedding_table(filename='embeddings/vocab_embeddings.txt') #embed.weights_init = Constant(embeddings_initial_value) # Calculate context encoding (concatenate layer1) cembed = embed.apply(context) clstms, chidden_list = make_bidir_lstm_stack( cembed, config.embed_size, context_mask.astype(theano.config.floatX), config.ctx_lstm_size, config.ctx_skip_connections, 'ctx') bricks = bricks + clstms if config.ctx_skip_connections: cenc_dim = 2 * sum(config.ctx_lstm_size) #2 : fw & bw cenc = tensor.concatenate(chidden_list, axis=2) else: cenc_dim = 2 * config.ctx_lstm_size[-1] cenc = tensor.concatenate(chidden_list[-2:], axis=2) cenc.name = 'cenc' # Build the encoder bricks transition = GatedRecurrent(activation=Tanh(), dim=config.generator_lstm_size, name="transition") attention = SequenceContentAttention( state_names=transition.apply.states, attended_dim=cenc_dim, match_dim=config.generator_lstm_size, name="attention") readout = Readout(readout_dim=vocab_size, source_names=[ transition.apply.states[0], attention.take_glimpses.outputs[0] ], emitter=MaskedSoftmaxEmitter(context_bag=context_bag, name='emitter'), feedback_brick=LookupFeedback( vocab_size, config.feedback_size), name="readout") generator = SequenceGenerator(readout=readout, transition=transition, attention=attention, name="generator") cost = generator.cost(answer, answer_mask.astype(theano.config.floatX), attended=cenc, attended_mask=context_mask.astype( theano.config.floatX), name="cost") self.predictions = generator.generate( n_steps=7, batch_size=config.batch_size, attended=cenc, attended_mask=context_mask.astype(theano.config.floatX), iterate=True)[1] # Apply dropout cg = ComputationGraph([cost]) if config.w_noise > 0: noise_vars = VariableFilter(roles=[WEIGHT])(cg) cg = apply_noise(cg, noise_vars, config.w_noise) if config.dropout > 0: cg = apply_dropout(cg, chidden_list, config.dropout) [cost_reg] = cg.outputs # Other stuff cost.name = 'cost' cost_reg.name = 'cost_reg' self.sgd_cost = cost_reg self.monitor_vars = [[cost_reg]] self.monitor_vars_valid = [[cost_reg]] # initialize new stuff manually (change!) generator.weights_init = IsotropicGaussian(0.01) generator.biases_init = Constant(0) generator.push_allocation_config() generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() # Initialize bricks embed.initialize() for brick in bricks: brick.weights_init = config.weights_init brick.biases_init = config.biases_init brick.initialize()
def main(config): vocab_src, _ = text_to_dict([config['train_src'], config['dev_src'], config['test_src']]) vocab_tgt, cabvo = text_to_dict([config['train_tgt'], config['dev_tgt']]) # Create Theano variables logger.info('Creating theano variables') source_sentence = tensor.lmatrix('source') source_sentence_mask = tensor.matrix('source_mask') target_sentence = tensor.lmatrix('target') target_sentence_mask = tensor.matrix('target_mask') source_sentence.tag.test_value = [[13, 20, 0, 20, 0, 20, 0], [1, 4, 8, 4, 8, 4, 8],] source_sentence_mask.tag.test_value = [[0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1],] target_sentence.tag.test_value = [[0,1,1,5], [2,0,1,0],] target_sentence_mask.tag.test_value = [[0,1,1,0], [1,1,1,0],] logger.info('Building RNN encoder-decoder') ### Building Encoder embedder = LookupTable( length=len(vocab_src), dim=config['embed_src'], weights_init=IsotropicGaussian(), biases_init=Constant(0.0), name='embedder') transformer = Linear( config['embed_src'], config['hidden_src']*4, weights_init=IsotropicGaussian(), biases_init=Constant(0.0), name='transformer') lstminit = np.asarray([0.0,]*config['hidden_src']+[0.0,]*config['hidden_src']+[1.0,]*config['hidden_src']+[0.0,]*config['hidden_src']) encoder = Bidirectional( LSTM( dim=config['hidden_src'], weights_init=IsotropicGaussian(0.01), biases_init=Constant(lstminit)), name='encoderBiLSTM' ) encoder.prototype.weights_init = Orthogonal() ### Building Decoder lstminit = np.asarray([0.0,]*config['hidden_tgt']+[0.0,]*config['hidden_tgt']+[1.0,]*config['hidden_tgt']+[0.0,]*config['hidden_tgt']) transition = LSTM2GO( attended_dim=config['hidden_tgt'], dim=config['hidden_tgt'], weights_init=IsotropicGaussian(0.01), biases_init=Constant(lstminit), name='decoderLSTM') attention = SequenceContentAttention( state_names=transition.apply.states, # default activation is Tanh state_dims=[config['hidden_tgt']], attended_dim=config['hidden_src']*2, match_dim=config['hidden_tgt'], name="attention") readout = Readout( source_names=['states', 'feedback', attention.take_glimpses.outputs[0]], readout_dim=len(vocab_tgt), emitter = SoftmaxEmitter( name='emitter'), feedback_brick = LookupFeedback( num_outputs=len(vocab_tgt), feedback_dim=config['embed_tgt'], name='feedback'), post_merge=InitializableFeedforwardSequence([ Bias(dim=config['hidden_tgt'], name='softmax_bias').apply, Linear(input_dim=config['hidden_tgt'], output_dim=config['embed_tgt'], use_bias=False, name='softmax0').apply, Linear(input_dim=config['embed_tgt'], name='softmax1').apply]), merged_dim=config['hidden_tgt']) decoder = SequenceGenerator( readout=readout, transition=transition, attention=attention, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator", fork=Fork( [name for name in transition.apply.sequences if name != 'mask'], prototype=Linear()), add_contexts=True) decoder.transition.weights_init = Orthogonal() #printchildren(encoder, 1) # Initialize model logger.info('Initializing model') embedder.initialize() transformer.initialize() encoder.initialize() decoder.initialize() # Apply model embedded = embedder.apply(source_sentence) tansformed = transformer.apply(embedded) encoded = encoder.apply(tansformed)[0] generated = decoder.generate( n_steps=2*source_sentence.shape[1], batch_size=source_sentence.shape[0], attended = encoded.dimshuffle(1,0,2), attended_mask=tensor.ones(source_sentence.shape).T ) print 'Generated: ', generated # generator_generate_outputs #samples = generated[1] # For GRU samples = generated[2] # For LSTM samples.name = 'samples' #samples_cost = generated[4] # For GRU samples_cost = generated[5] # For LSTM samples_cost = 'sampling_cost' cost = decoder.cost( mask = target_sentence_mask.T, outputs = target_sentence.T, attended = encoded.dimshuffle(1,0,2), attended_mask = source_sentence_mask.T) cost.name = 'target_cost' cost.tag.aggregation_scheme = TakeLast(cost) model = Model(cost) logger.info('Creating computational graph') cg = ComputationGraph(cost) # apply dropout for regularization if config['dropout'] < 1.0: # dropout is applied to the output of maxout in ghog logger.info('Applying dropout') dropout_inputs = [x for x in cg.intermediary_variables if x.name == 'maxout_apply_output'] cg = apply_dropout(cg, dropout_inputs, config['dropout']) ######## # Print shapes shapes = [param.get_value().shape for param in cg.parameters] logger.info("Parameter shapes: ") for shape, count in Counter(shapes).most_common(): logger.info(' {:15}: {}'.format(shape, count)) logger.info("Total number of parameters: {}".format(len(shapes))) printchildren(embedder, 1) printchildren(transformer, 1) printchildren(encoder, 1) printchildren(decoder, 1) # Print parameter names # enc_dec_param_dict = merge(Selector(embedder).get_parameters(), Selector(encoder).get_parameters(), Selector(decoder).get_parameters()) # enc_dec_param_dict = merge(Selector(decoder).get_parameters()) # logger.info("Parameter names: ") # for name, value in enc_dec_param_dict.items(): # logger.info(' {:15}: {}'.format(value.get_value().shape, name)) # logger.info("Total number of parameters: {}".format(len(enc_dec_param_dict))) ########## # Training data train_stream = get_train_stream(config, [config['train_src'],], [config['train_tgt'],], vocab_src, vocab_tgt) dev_stream = get_dev_stream( [config['dev_src'],], [config['dev_tgt'],], vocab_src, vocab_tgt) test_stream = get_test_stream([config['test_src'],], vocab_src) # Set extensions logger.info("Initializing extensions") extensions = [ FinishAfter(after_n_batches=config['finish_after']), ProgressBar(), TrainingDataMonitoring([cost], prefix="tra", after_batch=True), DataStreamMonitoring(variables=[cost], data_stream=dev_stream, prefix="dev", after_batch=True), Sampler( model=Model(samples), data_stream=dev_stream, vocab=cabvo, saveto=config['saveto']+'dev', every_n_batches=config['save_freq']), Sampler( model=Model(samples), data_stream=test_stream, vocab=cabvo, saveto=config['saveto']+'test', after_n_batches=1, on_resumption=True, before_training=True), Plotter(saveto=config['saveto'], after_batch=True), Printing(after_batch=True), Checkpoint( path=config['saveto'], parameters = cg.parameters, save_main_loop=False, every_n_batches=config['save_freq'])] if BOKEH_AVAILABLE: Plot('Training cost', channels=[['target_cost']], after_batch=True) if config['reload']: extensions.append(Load(path=config['saveto'], load_iteration_state=False, load_log=False)) else: with open(config['saveto']+'.txt', 'w') as f: pass # Set up training algorithm logger.info("Initializing training algorithm") algorithm = GradientDescent(cost=cost, parameters=cg.parameters, step_rule=CompositeRule([StepClipping(config['step_clipping']), eval(config['step_rule'])()]) ) # Initialize main loop logger.info("Initializing main loop") main_loop = MainLoop( model=model, algorithm=algorithm, data_stream=train_stream, extensions=extensions) main_loop.run()
post_merge = Identity(), merged_dim = dimension, name="readout") generator = SequenceGenerator( readout=readout, transition=transition, fork = Fork(['inputs'], prototype=Identity()), weights_init = initialization.Identity(1.), biases_init = initialization.Constant(0.), name="generator") generator.push_initialization_config() generator.transition.transition.weights_init = initialization.Identity(2.) generator.initialize() results = generator.generate(n_steps=n_steps, batch_size=1, iterate=True, return_initial_states = True) results_cg = ComputationGraph(results) results_tf = results_cg.get_theano_function() generated_sequence_t = results_tf()[1] generated_sequence_t.shape=(n_steps+1, dimension) print generated_sequence_t print generated_sequence
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of generating a Markov chain with RNN.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "sample"], help="The mode to run. Use `train` to train a new model" " and `sample` to sample a sequence generated by an" " existing one.") parser.add_argument( "save_path", default="sine", help="The part to save PyLearn2 model") parser.add_argument( "--steps", type=int, default=100, help="Number of steps to plot") parser.add_argument( "--reset", action="store_true", default=False, help="Start training from scratch") args = parser.parse_args() num_states = ChainDataset.num_states if args.mode == "train": # Experiment configuration rng = numpy.random.RandomState(1) batch_size = 50 seq_len = 100 dim = 10 feedback_dim = 8 # Build the bricks and initialize them transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim) generator = SequenceGenerator( LinearReadout(readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback( num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) logger.debug("Markov chain entropy: {}".format( ChainDataset.entropy)) logger.debug("Expected min error: {}".format( -ChainDataset.entropy * seq_len * batch_size)) if os.path.isfile(args.save_path) and not args.reset: model = Pylearn2Model.load(args.save_path) else: model = Pylearn2Model(generator) # Build the cost computation graph. # Note: would be probably nicer to make cost part of the model. x = tensor.ltensor3('x') cost = Pylearn2Cost(model.brick.cost(x[:, :, 0]).sum()) dataset = ChainDataset(rng, seq_len) sgd = SGD(learning_rate=0.0001, cost=cost, batch_size=batch_size, batches_per_iter=10, monitoring_dataset=dataset, monitoring_batch_size=batch_size, monitoring_batches=1, learning_rule=Pylearn2LearningRule( SGDLearningRule(), dict(training_objective=cost.cost))) train = Pylearn2Train(dataset, model, algorithm=sgd, save_path=args.save_path, save_freq=10) train.main_loop() elif args.mode == "sample": model = Pylearn2Model.load(args.save_path) generator = model.brick sample = ComputationGraph(generator.generate( n_steps=args.steps, batch_size=1, iterate=True)).function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, ChainDataset.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, ChainDataset.trans_prob)) else: assert False
class NoLookupDecoder(Initializable): """This is the decoder implementation without embedding layer or softmax. The target sentence is represented as a sequence of # vectors as defined by the sparse feature map. """ def __init__(self, vocab_size, embedding_dim, state_dim, att_dim, maxout_dim, representation_dim, attention_strategy='content', attention_sources='s', readout_sources='sfa', memory='none', memory_size=500, seq_len=50, init_strategy='last', theano_seed=None, **kwargs): """Creates a new decoder brick without embedding. Args: vocab_size (int): Target language vocabulary size embedding_dim (int): Size of feedback embedding layer state_dim (int): Number of hidden units att_dim (int): Size of attention match vector maxout_dim (int): Size of maxout layer representation_dim (int): Dimension of source annotations attention_strategy (string): Which attention should be used cf. ``_initialize_attention`` attention_sources (string): Defines the sources used by the attention model 's' for decoder states, 'f' for feedback readout_sources (string): Defines the sources used in the readout network. 's' for decoder states, 'f' for feedback, 'a' for attention (context vector) memory (string): Which external memory should be used (cf. ``_initialize_attention``) memory_size (int): Size of the external memory structure seq_len (int): Maximum sentence length init_strategy (string): How to initialize the RNN state (cf. ``GRUInitialState``) theano_seed: Random seed """ super(NoLookupDecoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.state_dim = state_dim self.representation_dim = representation_dim self.theano_seed = theano_seed # Initialize gru with special initial state self.transition = GRUInitialState( attended_dim=state_dim, init_strategy=init_strategy, dim=state_dim, activation=Tanh(), name='decoder') # Initialize the attention mechanism att_dim = att_dim if att_dim > 0 else state_dim self.attention,src_names = _initialize_attention(attention_strategy, seq_len, self.transition, representation_dim, att_dim, attention_sources, readout_sources, memory, memory_size) # Initialize the readout, note that SoftmaxEmitter emits -1 for # initial outputs which is used by LookupFeedBackWMT15 maxout_dim = maxout_dim if maxout_dim > 0 else state_dim readout = Readout( source_names=src_names, readout_dim=embedding_dim, emitter=NoLookupEmitter(initial_output=-1, readout_dim=embedding_dim, cost_brick=SquaredError()), # cost_brick=CategoricalCrossEntropy()), feedback_brick=TrivialFeedback(output_dim=embedding_dim), post_merge=InitializableFeedforwardSequence( [Bias(dim=maxout_dim, name='maxout_bias').apply, Maxout(num_pieces=2, name='maxout').apply, Linear(input_dim=maxout_dim / 2, output_dim=embedding_dim, use_bias=False, name='softmax0').apply, Logistic(name='softmax1').apply]), merged_dim=maxout_dim) # Build sequence generator accordingly self.sequence_generator = SequenceGenerator( readout=readout, transition=self.transition, attention=self.attention, fork=Fork([name for name in self.transition.apply.sequences if name != 'mask'], prototype=Linear()) ) self.children = [self.sequence_generator] @application(inputs=['representation', 'representation_mask', 'target_sentence_mask', 'target_sentence'], outputs=['cost']) def cost(self, representation, representation_mask, target_sentence, target_sentence_mask): target_sentence = target_sentence.T target_sentence_mask = target_sentence_mask.T # Get the cost matrix cost = self.sequence_generator.cost_matrix(**{ 'mask': target_sentence_mask, 'outputs': target_sentence, 'attended': representation, 'attended_mask': representation_mask} ) return (cost * target_sentence_mask).sum() / \ target_sentence_mask.shape[1] @application def generate(self, source_shape, representation, **kwargs): return self.sequence_generator.generate( n_steps=2 * source_shape[1], batch_size=source_shape[0], attended=representation, attended_mask=tensor.ones(source_shape).T, **kwargs)
generator = SequenceGenerator( readout=readout, transition=transition, fork = Fork(['inputs'], prototype=Identity()), weights_init = initialization.Identity(1.), biases_init = initialization.Constant(0.), name="generator") generator.push_initialization_config() #generator.fork.weights_init = initialization.Identity(1.) generator.transition.transition.weights_init = initialization.Identity(2.) generator.initialize() results = generator.generate(n_steps=n_steps, batch_size=2, iterate=True, return_initial_states = True) results_cg = ComputationGraph(results) results_tf = results_cg.get_theano_function() generated_sequence_t = results_tf()[1] generated_sequence_t.shape=(n_steps+1, dimension) print generated_sequence_t print generated_sequence from blocks.bricks.base import application from blocks.bricks.attention import AbstractAttention class SimpleSequenceAttention(AbstractAttention): """Combines a conditioning sequence and a recurrent transition via
def test_sequence_generator_with_lm(): floatX = theano.config.floatX rng = numpy.random.RandomState(1234) readout_dim = 5 feedback_dim = 3 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent(dim=dim, activation=Tanh(), weights_init=Orthogonal()) language_model = SequenceGenerator(Readout( readout_dim=readout_dim, source_names=["states"], emitter=SoftmaxEmitter(theano_seed=1234), feedback_brick=LookupFeedback(readout_dim, dim, name='feedback')), SimpleRecurrent(dim, Tanh()), name='language_model') generator = SequenceGenerator(Readout( readout_dim=readout_dim, source_names=["states", "lm_states"], emitter=SoftmaxEmitter(theano_seed=1234), feedback_brick=LookupFeedback(readout_dim, feedback_dim)), transition, language_model=language_model, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0), seed=1234) generator.initialize() # Test 'cost_matrix' method y = tensor.lmatrix('y') y.tag.test_value = numpy.zeros((15, batch_size), dtype='int64') mask = tensor.matrix('mask') mask.tag.test_value = numpy.ones((15, batch_size)) costs = generator.cost_matrix(y, mask) assert costs.ndim == 2 costs_fun = theano.function([y, mask], [costs]) y_test = rng.randint(readout_dim, size=(n_steps, batch_size)) m_test = numpy.ones((n_steps, batch_size), dtype=floatX) costs_val = costs_fun(y_test, m_test)[0] assert costs_val.shape == (n_steps, batch_size) assert_allclose(costs_val.sum(), 483.153, rtol=1e-5) # Test 'cost' method cost = generator.cost(y, mask) assert cost.ndim == 0 cost_val = theano.function([y, mask], cost)(y_test, m_test) assert_allclose(cost_val, 16.105, rtol=1e-5) # Test 'AUXILIARY' variable 'per_sequence_element' in 'cost' method cg = ComputationGraph([cost]) var_filter = VariableFilter(roles=[AUXILIARY]) aux_var_name = '_'.join( [generator.name, generator.cost.name, 'per_sequence_element']) cost_per_el = [ el for el in var_filter(cg.variables) if el.name == aux_var_name ][0] assert cost_per_el.ndim == 0 cost_per_el_val = theano.function([y, mask], [cost_per_el])(y_test, m_test) assert_allclose(cost_per_el_val, 1.61051, rtol=1e-5) # Test generate states, outputs, lm_states, costs = generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps) cg = ComputationGraph([states, outputs, costs]) states_val, outputs_val, costs_val = theano.function( [], [states, outputs, costs], updates=cg.updates)() assert states_val.shape == (n_steps, batch_size, dim) assert outputs_val.shape == (n_steps, batch_size) assert outputs_val.dtype == 'int64' assert costs_val.shape == (n_steps, batch_size) assert_allclose(states_val.sum(), -4.88367, rtol=1e-5) assert_allclose(costs_val.sum(), 486.681, rtol=1e-5) assert outputs_val.sum() == 627 # Test masks agnostic results of cost cost1 = costs_fun([[1], [2]], [[1], [1]])[0] cost2 = costs_fun([[3, 1], [4, 2], [2, 0]], [[1, 1], [1, 1], [1, 0]])[0] assert_allclose(cost1.sum(), cost2[:, 1].sum(), rtol=1e-5)
cost = cost_matrix.sum(axis=0).mean() cost.name = "nll" cg = ComputationGraph(cost) model = Model(cost) transition_matrix = VariableFilter(theano_name_regex="state_to_state")(cg.parameters) for matr in transition_matrix: matr.set_value(0.98 * np.eye(hidden_size_recurrent, dtype=floatX)) readouts = VariableFilter(applications=[generator.readout.readout], name_regex="output")(cg.variables)[0] mean, sigma, corr, weight, penup = emitter.components(readouts) emit = generator.generate( n_steps=400, iterate=True, attended=embed, attended_mask=context_mask, batch_size=embed.shape[1] )[-4] function([x, x_mask, context, context_mask], cost)(x_tr[0], x_tr[1], x_tr[2], x_tr[3]) emit_fn = ComputationGraph(emit).get_theano_function() emit_fn(x_tr[3], x_tr[2])[0].shape min_sigma = sigma.min(axis=(0, 2)).copy(name="sigma_min") mean_sigma = sigma.mean(axis=(0, 2)).copy(name="sigma_mean") max_sigma = sigma.max(axis=(0, 2)).copy(name="sigma_max") min_mean = mean.min(axis=(0, 2)).copy(name="mu_min") mean_mean = mean.mean(axis=(0, 2)).copy(name="mu_mean") max_mean = mean.max(axis=(0, 2)).copy(name="mu_max") min_corr = corr.min().copy(name="corr_min")
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of generating simple 1d sequences with RNN.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "plot"], help="The mode to run. Use `train` to train a new model" " and `plot` to plot a sequence generated by an" " existing one.") parser.add_argument("prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument("--input-noise", type=float, default=0.0, help="Adds Gaussian noise of given intensity to the " " training sequences.") parser.add_argument( "--function", default="lambda a, x: numpy.sin(a * x)", help="An analytical description of the sequence family to learn." " The arguments before the last one are considered parameters.") parser.add_argument("--steps", type=int, default=100, help="Number of steps to plot") parser.add_argument("--params", help="Parameter values for plotting") args = parser.parse_args() function = eval(args.function) num_params = len(inspect.getargspec(function).args) - 1 class Emitter(TrivialEmitter): @application def cost(self, readouts, outputs): """Compute MSE.""" return ((readouts - outputs)**2).sum(axis=readouts.ndim - 1) transition = GatedRecurrent(name="transition", activation=Tanh(), dim=10, weights_init=Orthogonal()) with_params = AddParameters(transition, num_params, "params", name="with_params") generator = SequenceGenerator(LinearReadout( readout_dim=1, source_names=["states"], emitter=Emitter(name="emitter"), name="readout"), with_params, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": seed = 1 rng = numpy.random.RandomState(seed) batch_size = 10 generator.initialize() cost = ComputationGraph( generator.cost(tensor.tensor3('x'), params=tensor.matrix("params")).sum()) cost = apply_noise(cost, cost.inputs, args.input_noise) gh_model = GroundhogModel(generator, cost) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() data = SeriesIterator(rng, function, 100, batch_size) trainer = SGD(gh_model, state, data) main_loop = MainLoop(data, None, None, gh_model, trainer, state, None) main_loop.load() main_loop.main() elif args.mode == "plot": load_params(generator, args.prefix + "model.npz") params = tensor.matrix("params") sample = theano.function([params], generator.generate(params=params, n_steps=args.steps, batch_size=1)) param_values = numpy.array(map(float, args.params.split()), dtype=floatX) states, outputs, _ = sample(param_values[None, :]) actual = outputs[:, 0, 0] desired = numpy.array( [function(*(list(param_values) + [T])) for T in range(args.steps)]) print("MSE: {}".format(((actual - desired)**2).sum())) pyplot.plot(numpy.hstack([actual[:, None], desired[:, None]])) pyplot.show() else: assert False
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of language modeling with RNN", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "sample"], help="The mode to run. Use `train` to train a new model" " and `sample` to sample a sequence generated by an" " existing one.") parser.add_argument("prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument("state", nargs="?", default="", help="Changes to Groundhog state") parser.add_argument("--path", help="Path to a language dataset") parser.add_argument("--dict", help="Path to the dataset dictionary") parser.add_argument("--restart", help="Start anew") parser.add_argument("--reset", action="store_true", default=False, help="Reset the hidden state between batches") parser.add_argument("--steps", type=int, default=100, help="Number of steps to plot for the 'sample' mode" " OR training sequence length for the 'train' mode.") args = parser.parse_args() logger.debug("Args:\n" + str(args)) dim = 200 num_chars = 50 transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator(LinearReadout( readout_dim=num_chars, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback(num_chars, dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": batch_size = 1 seq_len = args.steps generator.initialize() # Build cost computation graph that uses the saved hidden states. # An issue: for Groundhog this is completely transparent, that's # why it does not carry the hidden state over the period when # validation in done. We should find a way to fix in the future. x = tensor.lmatrix('x') init_states = shared_floatx_zeros((batch_size, dim), name='init_states') reset = tensor.scalar('reset') cost = ComputationGraph( generator.cost(x, states=init_states * reset).sum()) # TODO: better search routine states = [ v for v in cost.variables if hasattr(v.tag, 'application_call') and v.tag.application_call.brick == generator.transition and (v.tag.application_call.application == generator.transition.apply) and v.tag.role == VariableRole.OUTPUT and v.tag.name == 'states' ] assert len(states) == 1 states = states[0] gh_model = GroundhogModel(generator, cost) gh_model.properties.append( ('bpc', cost.outputs[0] * numpy.log(2) / seq_len)) gh_model.properties.append(('mean_init_state', init_states.mean())) gh_model.properties.append(('reset', reset)) if not args.reset: gh_model.updates.append((init_states, states[-1])) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() changes = eval("dict({})".format(args.state)) state.update(changes) def output_format(x, y, reset): return dict(x=x[:, None], reset=reset) train, valid, test = [ LMIterator(batch_size=batch_size, use_infinite_loop=mode == 'train', path=args.path, seq_len=seq_len, mode=mode, chunks='chars', output_format=output_format, can_fit=True) for mode in ['train', 'valid', 'test'] ] trainer = SGD(gh_model, state, train) state['on_nan'] = 'warn' state['cutoff'] = 1. main_loop = MainLoop(train, valid, None, gh_model, trainer, state, None) if not args.restart: main_loop.load() main_loop.main() elif args.mode == "sample": load_params(generator, args.prefix + "model.npz") chars = numpy.load(args.dict)['unique_chars'] sample = ComputationGraph( generator.generate(n_steps=args.steps, batch_size=10, iterate=True)).function() states, outputs, costs = sample() for i in range(10): print("Generation cost: {}".format(costs[:, i].sum())) print("".join([chars[o] for o in outputs[:, i]])) else: assert False
generator.transition.push_initialization_config() generator.initialize() ############## # Test model ############## cost_matrix = generator.cost_matrix(x, attended = mlp_context.apply(context)) cost = cost_matrix.mean() cost.name = "nll" emit = generator.generate( attended = mlp_context.apply(context), n_steps = context.shape[0], batch_size = context.shape[1], iterate = True )[-4] cg = ComputationGraph(cost) model = Model(cost) ################# # Algorithm ################# n_batches = 139#139*16 algorithm = GradientDescent( cost=cost, parameters=cg.parameters, step_rule=CompositeRule([StepClipping(10.0), Adam(lr)]))
def test_sequence_generator_with_lm(): floatX = theano.config.floatX rng = numpy.random.RandomState(1234) readout_dim = 5 feedback_dim = 3 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent(dim=dim, activation=Tanh(), weights_init=Orthogonal()) language_model = SequenceGenerator( Readout(readout_dim=readout_dim, source_names=["states"], emitter=SoftmaxEmitter(theano_seed=1234), feedback_brick=LookupFeedback(readout_dim, dim, name='feedback')), SimpleRecurrent(dim, Tanh()), name='language_model') generator = SequenceGenerator( Readout(readout_dim=readout_dim, source_names=["states", "lm_states"], emitter=SoftmaxEmitter(theano_seed=1234), feedback_brick=LookupFeedback(readout_dim, feedback_dim)), transition, language_model=language_model, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0), seed=1234) generator.initialize() # Test 'cost_matrix' method y = tensor.lmatrix('y') y.tag.test_value = numpy.zeros((15, batch_size), dtype='int64') mask = tensor.matrix('mask') mask.tag.test_value = numpy.ones((15, batch_size)) costs = generator.cost_matrix(y, mask) assert costs.ndim == 2 costs_fun = theano.function([y, mask], [costs]) y_test = rng.randint(readout_dim, size=(n_steps, batch_size)) m_test = numpy.ones((n_steps, batch_size), dtype=floatX) costs_val = costs_fun(y_test, m_test)[0] assert costs_val.shape == (n_steps, batch_size) assert_allclose(costs_val.sum(), 483.153, rtol=1e-5) # Test 'cost' method cost = generator.cost(y, mask) assert cost.ndim == 0 cost_val = theano.function([y, mask], cost)(y_test, m_test) assert_allclose(cost_val, 16.105, rtol=1e-5) # Test 'AUXILIARY' variable 'per_sequence_element' in 'cost' method cg = ComputationGraph([cost]) var_filter = VariableFilter(roles=[AUXILIARY]) aux_var_name = '_'.join([generator.name, generator.cost.name, 'per_sequence_element']) cost_per_el = [el for el in var_filter(cg.variables) if el.name == aux_var_name][0] assert cost_per_el.ndim == 0 cost_per_el_val = theano.function([y, mask], [cost_per_el])(y_test, m_test) assert_allclose(cost_per_el_val, 1.61051, rtol=1e-5) # Test generate states, outputs, lm_states, costs = generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps) cg = ComputationGraph([states, outputs, costs]) states_val, outputs_val, costs_val = theano.function( [], [states, outputs, costs], updates=cg.updates)() assert states_val.shape == (n_steps, batch_size, dim) assert outputs_val.shape == (n_steps, batch_size) assert outputs_val.dtype == 'int64' assert costs_val.shape == (n_steps, batch_size) assert_allclose(states_val.sum(), -4.88367, rtol=1e-5) assert_allclose(costs_val.sum(), 486.681, rtol=1e-5) assert outputs_val.sum() == 627 # Test masks agnostic results of cost cost1 = costs_fun([[1], [2]], [[1], [1]])[0] cost2 = costs_fun([[3, 1], [4, 2], [2, 0]], [[1, 1], [1, 1], [1, 0]])[0] assert_allclose(cost1.sum(), cost2[:, 1].sum(), rtol=1e-5)
cost.name = "nll" cg = ComputationGraph(cost) model = Model(cost) transition_matrix = VariableFilter(theano_name_regex="state_to_state")( cg.parameters) for matr in transition_matrix: matr.set_value(0.98 * np.eye(hidden_size_recurrent, dtype=floatX)) readouts = VariableFilter(applications=[generator.readout.readout], name_regex="output")(cg.variables)[0] mean, sigma, corr, weight, penup = emitter.components(readouts) emit = generator.generate(n_steps=400, batch_size=8, iterate=True)[-2] #ipdb.set_trace() function([x, x_mask], cost)(x_tr[0], x_tr[1]) emit_fn = ComputationGraph(emit).get_theano_function() emit_fn() min_sigma = sigma.min(axis=(0, 2)).copy(name="sigma_min") mean_sigma = sigma.mean(axis=(0, 2)).copy(name="sigma_mean") max_sigma = sigma.max(axis=(0, 2)).copy(name="sigma_max") min_mean = mean.min(axis=(0, 2)).copy(name="mu_min") mean_mean = mean.mean(axis=(0, 2)).copy(name="mu_mean") max_mean = mean.max(axis=(0, 2)).copy(name="mu_max")
def test_attention_transition(): inp_dim = 2 inp_len = 10 attended_dim = 3 attended_len = 11 batch_size = 4 n_steps = 30 transition = TestTransition(dim=inp_dim, attended_dim=attended_dim, name="transition") attention = SequenceContentAttention(transition.apply.states, match_dim=inp_dim, name="attention") mixer = Mixer([name for name in transition.apply.sequences if name != 'mask'], attention.take_look.outputs[0], name="mixer") att_trans = AttentionTransition(transition, attention, mixer, name="att_trans") att_trans.weights_init = IsotropicGaussian(0.01) att_trans.biases_init = Constant(0) att_trans.initialize() attended = tensor.tensor3("attended") attended_mask = tensor.matrix("attended_mask") inputs = tensor.tensor3("inputs") inputs_mask = tensor.matrix("inputs_mask") states, glimpses, weights = att_trans.apply( input_=inputs, mask=inputs_mask, attended=attended, attended_mask=attended_mask) assert states.ndim == 3 assert glimpses.ndim == 3 assert weights.ndim == 3 input_vals = numpy.zeros((inp_len, batch_size, inp_dim), dtype=floatX) input_mask_vals = numpy.ones((inp_len, batch_size), dtype=floatX) attended_vals = numpy.zeros((attended_len, batch_size, attended_dim), dtype=floatX) attended_mask_vals = numpy.ones((attended_len, batch_size), dtype=floatX) func = theano.function([inputs, inputs_mask, attended, attended_mask], [states, glimpses, weights]) states_vals, glimpses_vals, weight_vals = func( input_vals, input_mask_vals, attended_vals, attended_mask_vals) assert states_vals.shape == input_vals.shape assert glimpses_vals.shape == (inp_len, batch_size, attended_dim) assert weight_vals.shape == (inp_len, batch_size, attended_len) # Test SequenceGenerator using AttentionTransition generator = SequenceGenerator( LinearReadout(readout_dim=inp_dim, source_names=["state"], emitter=TestEmitter(name="emitter"), name="readout"), transition=transition, attention=attention, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") outputs = tensor.tensor3('outputs') costs = generator.cost(outputs, attended=attended, attended_mask=attended_mask) costs_vals = costs.eval({outputs: input_vals, attended: attended_vals, attended_mask: attended_mask_vals}) assert costs_vals.shape == (inp_len, batch_size) results = ( generator.generate(n_steps=n_steps, batch_size=attended.shape[1], attended=attended, attended_mask=attended_mask)) assert len(results) == 5 states_vals, outputs_vals, glimpses_vals, weights_vals, costs_vals = ( theano.function([attended, attended_mask], results) (attended_vals, attended_mask_vals)) assert states_vals.shape == (n_steps, batch_size, inp_dim) assert states_vals.shape == outputs_vals.shape assert glimpses_vals.shape == (n_steps, batch_size, attended_dim) assert weights_vals.shape == (n_steps, batch_size, attended_len) assert costs_vals.shape == (n_steps, batch_size)
class Decoder(Initializable): def __init__(self, vocab_size, embedding_dim, state_dim, representation_dim, **kwargs): super(Decoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.state_dim = state_dim self.representation_dim = representation_dim self.transition = GRUInitialState( attended_dim=state_dim, dim=state_dim, activation=Tanh(), name='decoder') self.attention = SequenceContentAttention( state_names=self.transition.apply.states, attended_dim=representation_dim, match_dim=state_dim, name="attention") readout = Readout( source_names=['states', 'feedback', self.attention.take_glimpses.outputs[0]], readout_dim=self.vocab_size, emitter=SoftmaxEmitter(initial_output=-1), feedback_brick=LookupFeedbackWMT15(vocab_size, embedding_dim), post_merge=InitializableFeedforwardSequence( [Bias(dim=state_dim, name='maxout_bias').apply, Maxout(num_pieces=2, name='maxout').apply, Linear(input_dim=state_dim / 2, output_dim=embedding_dim, use_bias=False, name='softmax0').apply, Linear(input_dim=embedding_dim, name='softmax1').apply]), merged_dim=state_dim) self.sequence_generator = SequenceGenerator( readout=readout, transition=self.transition, attention=self.attention, fork=Fork([name for name in self.transition.apply.sequences if name != 'mask'], prototype=Linear()) ) self.children = [self.sequence_generator] @application(inputs=['representation', 'source_sentence_mask', 'target_sentence_mask', 'target_sentence'], outputs=['cost']) def cost(self, representation, source_sentence_mask, target_sentence, target_sentence_mask): source_sentence_mask = source_sentence_mask.T target_sentence = target_sentence.T target_sentence_mask = target_sentence_mask.T # Get the cost matrix cost = self.sequence_generator.cost_matrix( **{'mask': target_sentence_mask, 'outputs': target_sentence, 'attended': representation, 'attended_mask': source_sentence_mask} ) return (cost * target_sentence_mask).sum() / target_sentence_mask.shape[1] @application def generate(self, source_sentence, representation): return self.sequence_generator.generate( n_steps=2 * source_sentence.shape[1], batch_size=source_sentence.shape[0], attended=representation, attended_mask=tensor.ones(source_sentence.shape).T)
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of language modeling with RNN", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "sample"], help="The mode to run. Use `train` to train a new model" " and `sample` to sample a sequence generated by an" " existing one.") parser.add_argument( "prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument( "state", nargs="?", default="", help="Changes to Groundhog state") parser.add_argument("--path", help="Path to a language dataset") parser.add_argument("--dict", help="Path to the dataset dictionary") parser.add_argument("--restart", help="Start anew") parser.add_argument( "--reset", action="store_true", default=False, help="Reset the hidden state between batches") parser.add_argument( "--steps", type=int, default=100, help="Number of steps to plot for the 'sample' mode" " OR training sequence length for the 'train' mode.") args = parser.parse_args() logger.debug("Args:\n" + str(args)) dim = 200 num_chars = 50 transition = GatedRecurrent( name="transition", activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator( LinearReadout(readout_dim=num_chars, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback( num_chars, dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": batch_size = 1 seq_len = args.steps generator.initialize() # Build cost computation graph that uses the saved hidden states. # An issue: for Groundhog this is completely transparent, that's # why it does not carry the hidden state over the period when # validation in done. We should find a way to fix in the future. x = tensor.lmatrix('x') init_states = shared_floatx_zeros((batch_size, dim), name='init_states') reset = tensor.scalar('reset') cost = ComputationGraph( generator.cost(x, states=init_states * reset).sum()) # TODO: better search routine states = [v for v in cost.variables if hasattr(v.tag, 'application_call') and v.tag.application_call.brick == generator.transition and (v.tag.application_call.application == generator.transition.apply) and v.tag.role == VariableRole.OUTPUT and v.tag.name == 'states'] assert len(states) == 1 states = states[0] gh_model = GroundhogModel(generator, cost) gh_model.properties.append( ('bpc', cost.outputs[0] * numpy.log(2) / seq_len)) gh_model.properties.append(('mean_init_state', init_states.mean())) gh_model.properties.append(('reset', reset)) if not args.reset: gh_model.updates.append((init_states, states[-1])) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() changes = eval("dict({})".format(args.state)) state.update(changes) def output_format(x, y, reset): return dict(x=x[:, None], reset=reset) train, valid, test = [ LMIterator(batch_size=batch_size, use_infinite_loop=mode == 'train', path=args.path, seq_len=seq_len, mode=mode, chunks='chars', output_format=output_format, can_fit=True) for mode in ['train', 'valid', 'test']] trainer = SGD(gh_model, state, train) state['on_nan'] = 'warn' state['cutoff'] = 1. main_loop = MainLoop(train, valid, None, gh_model, trainer, state, None) if not args.restart: main_loop.load() main_loop.main() elif args.mode == "sample": load_params(generator, args.prefix + "model.npz") chars = numpy.load(args.dict)['unique_chars'] sample = ComputationGraph(generator.generate( n_steps=args.steps, batch_size=10, iterate=True)).function() states, outputs, costs = sample() for i in range(10): print("Generation cost: {}".format(costs[:, i].sum())) print("".join([chars[o] for o in outputs[:, i]])) else: assert False
TrainingDataMonitoring([cost], prefix="this_step", after_batch=True), TrainingDataMonitoring([cost], prefix="average", every_n_batches=100), Checkpoint(args.model, every_n_epochs=5), Printing(every_n_batches=100) ]) main_loop.run() elif args.mode == "retrain": main_loop = load(open(args.model, "rb")) main_loop.run() elif args.mode == "sample": main_loop = load(open(args.model, "rb")) # get the one and only brick in the computation graph generator = main_loop.model.get_top_bricks()[0] sample = ComputationGraph( generator.generate(n_steps=args.sample_size, batch_size=1, iterate=True)).get_theano_function() states, outputs, costs = [data[:, 0] for data in sample()] print "".join(corpus.decode(outputs)) else: assert False
def test_sequence_generator(): """Test a sequence generator with no contexts and continuous outputs. Such sequence generators can be used to model e.g. dynamical systems. """ rng = numpy.random.RandomState(1234) output_dim = 1 dim = 20 batch_size = 30 n_steps = 10 transition = SimpleRecurrent(activation=Tanh(), dim=dim, weights_init=Orthogonal()) generator = SequenceGenerator( Readout(readout_dim=output_dim, source_names=["states"], emitter=TestEmitter()), transition, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0.0), seed=1234) generator.initialize() # Test 'cost_matrix' method y = tensor.tensor3('y') mask = tensor.matrix('mask') costs = generator.cost_matrix(y, mask) assert costs.ndim == 2 y_test = rng.uniform(size=(n_steps, batch_size, output_dim)).astype(floatX) m_test = numpy.ones((n_steps, batch_size), dtype=floatX) costs_val = theano.function([y, mask], [costs])(y_test, m_test)[0] assert costs_val.shape == (n_steps, batch_size) assert_allclose(costs_val.sum(), 115.593, rtol=1e-5) # Test 'cost' method cost = generator.cost(y, mask) assert cost.ndim == 0 cost_val = theano.function([y, mask], [cost])(y_test, m_test) assert_allclose(cost_val, 3.8531, rtol=1e-5) # Test 'AUXILIARY' variable 'per_sequence_element' in 'cost' method cg = ComputationGraph([cost]) var_filter = VariableFilter(roles=[AUXILIARY]) aux_var_name = '_'.join([generator.name, generator.cost.name, 'per_sequence_element']) cost_per_el = [el for el in var_filter(cg.variables) if el.name == aux_var_name][0] assert cost_per_el.ndim == 0 cost_per_el_val = theano.function([y, mask], [cost_per_el])(y_test, m_test) assert_allclose(cost_per_el_val, 0.38531, rtol=1e-5) # Test 'generate' method states, outputs, costs = [variable.eval() for variable in generator.generate( states=rng.uniform( size=(batch_size, dim)).astype(floatX), iterate=True, batch_size=batch_size, n_steps=n_steps)] assert states.shape == (n_steps, batch_size, dim) assert outputs.shape == (n_steps, batch_size, output_dim) assert costs.shape == (n_steps, batch_size) assert_allclose(outputs.sum(), -0.33683, rtol=1e-5) assert_allclose(states.sum(), 15.7909, rtol=1e-5) # There is no generation cost in this case, since generation is # deterministic assert_allclose(costs.sum(), 0.0)
class PyramidLayer(Initializable): """Basic unit for the pyramid model. """ def __init__(self, batch_size, frame_size, k, depth, size, **kwargs): super(PyramidLayer, self).__init__(**kwargs) target_size = frame_size * k depth_x = depth hidden_size_mlp_x = 32*size depth_transition = depth-1 depth_theta = depth hidden_size_mlp_theta = 32*size hidden_size_recurrent = 32*size*3 depth_context = depth hidden_size_mlp_context = 32*size context_size = 32*size activations_x = [Rectifier()]*depth_x dims_x = [frame_size] + [hidden_size_mlp_x]*(depth_x-1) + \ [4*hidden_size_recurrent] activations_theta = [Rectifier()]*depth_theta dims_theta = [hidden_size_recurrent] + \ [hidden_size_mlp_theta]*depth_theta activations_context = [Rectifier()]*depth_context dims_context = [frame_size] + [hidden_size_mlp_context]*(depth_context-1) + \ [context_size] mlp_x = MLP(activations = activations_x, dims = dims_x, name = "mlp_x") feedback = DeepTransitionFeedback(mlp = mlp_x) transition = [GatedRecurrent(dim=hidden_size_recurrent, use_bias = True, name = "gru_{}".format(i) ) for i in range(depth_transition)] transition = RecurrentStack( transition, name="transition", skip_connections = True) self.transition = transition mlp_theta = MLP( activations = activations_theta, dims = dims_theta, name = "mlp_theta") mlp_gmm = GMMMLP(mlp = mlp_theta, dim = target_size, k = k, const = 0.00001, name = "gmm_wrap") gmm_emitter = GMMEmitter(gmmmlp = mlp_gmm, output_size = frame_size, k = k) source_names = [name for name in transition.apply.states if 'states' in name] attention = SimpleSequenceAttention( state_names = source_names, state_dims = [hidden_size_recurrent], attended_dim = context_size, name = "attention") #ipdb.set_trace() # Verify source names readout = Readout( readout_dim = hidden_size_recurrent, source_names =source_names + ['feedback'] + ['glimpses'], emitter=gmm_emitter, feedback_brick = feedback, name="readout") self.generator = SequenceGenerator(readout=readout, transition=transition, attention = attention, name = "generator") self.mlp_context = MLP(activations = activations_context, dims = dims_context) self.children = [self.generator, self.mlp_context] self.final_states = [] def monitoring_vars(self, cg): readout = self.generator.readout readouts = VariableFilter( applications = [readout.readout], name_regex = "output")(cg.variables)[0] mu, sigma, coeff = readout.emitter.components(readouts) min_sigma = sigma.min().copy(name="sigma_min") mean_sigma = sigma.mean().copy(name="sigma_mean") max_sigma = sigma.max().copy(name="sigma_max") min_mu = mu.min().copy(name="mu_min") mean_mu = mu.mean().copy(name="mu_mean") max_mu = mu.max().copy(name="mu_max") monitoring_vars = [mean_sigma, min_sigma, min_mu, max_mu, mean_mu, max_sigma] return monitoring_vars @application def cost(self, x, context, **kwargs): cost_matrix = self.generator.cost_matrix( x, attended=self.mlp_context.apply(context), **kwargs) return cost_matrix.mean() @application def generate(context): return self.generator.generate( attended = self.mlp_context.apply(context), n_steps = context.shape[0], batch_size = context.shape[1], iterate = True)
class Decoder(Initializable): """Decoder of RNNsearch model.""" def __init__(self, vocab_size, embedding_dim, state_dim, representation_dim, theano_seed=None, **kwargs): super(Decoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.state_dim = state_dim self.representation_dim = representation_dim self.theano_seed = theano_seed # Initialize gru with special initial state self.transition = GRUInitialState(attended_dim=state_dim, dim=state_dim, activation=Tanh(), name='decoder') # Initialize the attention mechanism self.attention = SequenceContentAttention( state_names=self.transition.apply.states, attended_dim=representation_dim, match_dim=state_dim, name="attention") # Initialize the readout, note that SoftmaxEmitter emits -1 for # initial outputs which is used by LookupFeedBackWMT15 readout = Readout(source_names=[ 'states', 'feedback', self.attention.take_glimpses.outputs[0] ], readout_dim=self.vocab_size, emitter=SoftmaxEmitter(initial_output=-1, theano_seed=theano_seed), feedback_brick=LookupFeedbackWMT15( vocab_size, embedding_dim), post_merge=InitializableFeedforwardSequence([ Bias(dim=state_dim, name='maxout_bias').apply, Maxout(num_pieces=2, name='maxout').apply, Linear(input_dim=state_dim / 2, output_dim=embedding_dim, use_bias=False, name='softmax0').apply, Linear(input_dim=embedding_dim, name='softmax1').apply ]), merged_dim=state_dim) # Build sequence generator accordingly self.sequence_generator = SequenceGenerator( readout=readout, transition=self.transition, attention=self.attention, fork=Fork([ name for name in self.transition.apply.sequences if name != 'mask' ], prototype=Linear())) self.children = [self.sequence_generator] @application(inputs=[ 'representation', 'source_sentence_mask', 'target_sentence_mask', 'target_sentence' ], outputs=['cost']) def cost(self, representation, source_sentence_mask, target_sentence, target_sentence_mask): source_sentence_mask = source_sentence_mask.T target_sentence = target_sentence.T target_sentence_mask = target_sentence_mask.T # Get the cost matrix cost = self.sequence_generator.cost_matrix( **{ 'mask': target_sentence_mask, 'outputs': target_sentence, 'attended': representation, 'attended_mask': source_sentence_mask }) return (cost * target_sentence_mask).sum() / \ target_sentence_mask.shape[1] @application def generate(self, source_sentence, representation, **kwargs): return self.sequence_generator.generate( n_steps=2 * source_sentence.shape[1], batch_size=source_sentence.shape[0], attended=representation, attended_mask=tensor.ones(source_sentence.shape).T, **kwargs)
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of generating a Markov chain with RNN.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "sample"], help="The mode to run. Use `train` to train a new model" " and `sample` to sample a sequence generated by an" " existing one.") parser.add_argument( "prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument( "--steps", type=int, default=100, help="Number of steps to plot") args = parser.parse_args() dim = 10 num_states = ChainIterator.num_states feedback_dim = 8 transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim) generator = SequenceGenerator( LinearReadout(readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedbacker=LookupFeedback( num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": rng = numpy.random.RandomState(1) batch_size = 50 generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() logger.debug("transition.weights_init={}".format( transition.weights_init)) cost = generator.cost(tensor.lmatrix('x')).sum() gh_model = GroundhogModel(generator, cost) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() data = ChainIterator(rng, 100, batch_size) trainer = SGD(gh_model, state, data) main_loop = MainLoop(data, None, None, gh_model, trainer, state, None) main_loop.main() elif args.mode == "sample": load_params(generator, args.prefix + "model.npz") sample = ComputationGraph(generator.generate( n_steps=args.steps, batch_size=1, iterate=True)).function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, ChainIterator.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, ChainIterator.trans_prob)) else: assert False
def main(): logging.basicConfig( level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") parser = argparse.ArgumentParser( "Case study of generating simple 1d sequences with RNN.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "mode", choices=["train", "plot"], help="The mode to run. Use `train` to train a new model" " and `plot` to plot a sequence generated by an" " existing one.") parser.add_argument( "prefix", default="sine", help="The prefix for model, timing and state files") parser.add_argument( "--input-noise", type=float, default=0.0, help="Adds Gaussian noise of given intensity to the " " training sequences.") parser.add_argument( "--function", default="lambda a, x: numpy.sin(a * x)", help="An analytical description of the sequence family to learn." " The arguments before the last one are considered parameters.") parser.add_argument( "--steps", type=int, default=100, help="Number of steps to plot") parser.add_argument( "--params", help="Parameter values for plotting") args = parser.parse_args() function = eval(args.function) num_params = len(inspect.getargspec(function).args) - 1 class Emitter(TrivialEmitter): @application def cost(self, readouts, outputs): """Compute MSE.""" return ((readouts - outputs) ** 2).sum(axis=readouts.ndim - 1) transition = GatedRecurrent( name="transition", activation=Tanh(), dim=10, weights_init=Orthogonal()) with_params = AddParameters(transition, num_params, "params", name="with_params") generator = SequenceGenerator( LinearReadout(readout_dim=1, source_names=["states"], emitter=Emitter(name="emitter"), name="readout"), with_params, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.allocate() logger.debug("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) if args.mode == "train": seed = 1 rng = numpy.random.RandomState(seed) batch_size = 10 generator.initialize() cost = ComputationGraph( generator.cost(tensor.tensor3('x'), params=tensor.matrix("params")).sum()) cost = apply_noise(cost, cost.inputs, args.input_noise) gh_model = GroundhogModel(generator, cost) state = GroundhogState(args.prefix, batch_size, learning_rate=0.0001).as_dict() data = SeriesIterator(rng, function, 100, batch_size) trainer = SGD(gh_model, state, data) main_loop = MainLoop(data, None, None, gh_model, trainer, state, None) main_loop.load() main_loop.main() elif args.mode == "plot": load_params(generator, args.prefix + "model.npz") params = tensor.matrix("params") sample = theano.function([params], generator.generate( params=params, n_steps=args.steps, batch_size=1)) param_values = numpy.array(map(float, args.params.split()), dtype=floatX) states, outputs, _ = sample(param_values[None, :]) actual = outputs[:, 0, 0] desired = numpy.array([function(*(list(param_values) + [T])) for T in range(args.steps)]) print("MSE: {}".format(((actual - desired) ** 2).sum())) pyplot.plot(numpy.hstack([actual[:, None], desired[:, None]])) pyplot.show() else: assert False
def test_with_attention(): """Test a sequence generator with continuous outputs and attention.""" rng = numpy.random.RandomState(1234) inp_dim = 2 inp_len = 10 attended_dim = 3 attended_len = 11 batch_size = 4 n_steps = 30 # For values def rand(size): return rng.uniform(size=size).astype(floatX) # For masks def generate_mask(length, batch_size): mask = numpy.ones((length, batch_size), dtype=floatX) # To make it look like read data for i in range(batch_size): mask[1 + rng.randint(0, length - 1):, i] = 0.0 return mask output_vals = rand((inp_len, batch_size, inp_dim)) output_mask_vals = generate_mask(inp_len, batch_size) attended_vals = rand((attended_len, batch_size, attended_dim)) attended_mask_vals = generate_mask(attended_len, batch_size) transition = TestTransition( dim=inp_dim, attended_dim=attended_dim, activation=Identity()) attention = SequenceContentAttention( state_names=transition.apply.states, match_dim=inp_dim) generator = SequenceGenerator( Readout( readout_dim=inp_dim, source_names=[transition.apply.states[0], attention.take_glimpses.outputs[0]], emitter=TestEmitter()), transition=transition, attention=attention, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0), add_contexts=False, seed=1234) generator.initialize() # Test 'cost_matrix' method attended = tensor.tensor3("attended") attended_mask = tensor.matrix("attended_mask") outputs = tensor.tensor3('outputs') mask = tensor.matrix('mask') costs = generator.cost_matrix(outputs, mask, attended=attended, attended_mask=attended_mask) costs_vals = costs.eval({outputs: output_vals, mask: output_mask_vals, attended: attended_vals, attended_mask: attended_mask_vals}) assert costs_vals.shape == (inp_len, batch_size) assert_allclose(costs_vals.sum(), 13.5042, rtol=1e-5) # Test `generate` method results = ( generator.generate(n_steps=n_steps, batch_size=attended.shape[1], attended=attended, attended_mask=attended_mask)) assert len(results) == 5 states_vals, outputs_vals, glimpses_vals, weights_vals, costs_vals = ( theano.function([attended, attended_mask], results) (attended_vals, attended_mask_vals)) assert states_vals.shape == (n_steps, batch_size, inp_dim) assert states_vals.shape == outputs_vals.shape assert glimpses_vals.shape == (n_steps, batch_size, attended_dim) assert weights_vals.shape == (n_steps, batch_size, attended_len) assert costs_vals.shape == (n_steps, batch_size) assert_allclose(states_vals.sum(), 23.4172, rtol=1e-5) # There is no generation cost in this case, since generation is # deterministic assert_allclose(costs_vals.sum(), 0.0, rtol=1e-5) assert_allclose(weights_vals.sum(), 120.0, rtol=1e-5) assert_allclose(glimpses_vals.sum(), 199.2402, rtol=1e-5) assert_allclose(outputs_vals.sum(), -11.6008, rtol=1e-5)
def main(mode, save_path, steps, num_batches): num_states = MarkovChainDataset.num_states if mode == "train": # Experiment configuration rng = numpy.random.RandomState(1) batch_size = 50 seq_len = 100 dim = 10 feedback_dim = 8 # Build the bricks and initialize them transition = GatedRecurrent(name="transition", dim=dim, activation=Tanh()) generator = SequenceGenerator(Readout( readout_dim=num_states, source_names=["states"], emitter=SoftmaxEmitter(name="emitter"), feedback_brick=LookupFeedback(num_states, feedback_dim, name='feedback'), name="readout"), transition, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") generator.push_initialization_config() transition.weights_init = Orthogonal() generator.initialize() # Give an idea of what's going on. logger.info("Parameters:\n" + pprint.pformat( [(key, value.get_value().shape) for key, value in Selector(generator).get_params().items()], width=120)) logger.info("Markov chain entropy: {}".format( MarkovChainDataset.entropy)) logger.info("Expected min error: {}".format( -MarkovChainDataset.entropy * seq_len)) # Build the cost computation graph. x = tensor.lmatrix('data') cost = aggregation.mean( generator.cost_matrix(x[:, :]).sum(), x.shape[1]) cost.name = "sequence_log_likelihood" algorithm = GradientDescent( cost=cost, params=list(Selector(generator).get_params().values()), step_rule=Scale(0.001)) main_loop = MainLoop(algorithm=algorithm, data_stream=DataStream( MarkovChainDataset(rng, seq_len), iteration_scheme=ConstantScheme(batch_size)), model=Model(cost), extensions=[ FinishAfter(after_n_batches=num_batches), TrainingDataMonitoring([cost], prefix="this_step", after_batch=True), TrainingDataMonitoring([cost], prefix="average", every_n_batches=100), Checkpoint(save_path, every_n_batches=500), Printing(every_n_batches=100) ]) main_loop.run() elif mode == "sample": main_loop = cPickle.load(open(save_path, "rb")) generator = main_loop.model sample = ComputationGraph( generator.generate(n_steps=steps, batch_size=1, iterate=True)).get_theano_function() states, outputs, costs = [data[:, 0] for data in sample()] numpy.set_printoptions(precision=3, suppress=True) print("Generation cost:\n{}".format(costs.sum())) freqs = numpy.bincount(outputs).astype(floatX) freqs /= freqs.sum() print("Frequencies:\n {} vs {}".format(freqs, MarkovChainDataset.equilibrium)) trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX) for a, b in zip(outputs, outputs[1:]): trans_freqs[a, b] += 1 trans_freqs /= trans_freqs.sum(axis=1)[:, None] print("Transition frequencies:\n{}\nvs\n{}".format( trans_freqs, MarkovChainDataset.trans_prob)) else: assert False
def test_integer_sequence_generator(): """Test a sequence generator with integer outputs. Such sequence generators can be used to e.g. model language. """ rng = numpy.random.RandomState(1234) readout_dim = 5 feedback_dim = 3 dim = 20 batch_size = 30 n_steps = 10 transition = GatedRecurrent(dim=dim, activation=Tanh(), weights_init=Orthogonal()) generator = SequenceGenerator( Readout(readout_dim=readout_dim, source_names=["states"], emitter=SoftmaxEmitter(theano_seed=1234), feedback_brick=LookupFeedback(readout_dim, feedback_dim)), transition, weights_init=IsotropicGaussian(0.1), biases_init=Constant(0), seed=1234) generator.initialize() # Test 'cost_matrix' method y = tensor.lmatrix('y') mask = tensor.matrix('mask') costs = generator.cost_matrix(y, mask) assert costs.ndim == 2 costs_fun = theano.function([y, mask], [costs]) y_test = rng.randint(readout_dim, size=(n_steps, batch_size)) m_test = numpy.ones((n_steps, batch_size), dtype=floatX) costs_val = costs_fun(y_test, m_test)[0] assert costs_val.shape == (n_steps, batch_size) assert_allclose(costs_val.sum(), 482.827, rtol=1e-5) # Test 'cost' method cost = generator.cost(y, mask) assert cost.ndim == 0 cost_val = theano.function([y, mask], [cost])(y_test, m_test) assert_allclose(cost_val, 16.0942, rtol=1e-5) # Test 'AUXILIARY' variable 'per_sequence_element' in 'cost' method cg = ComputationGraph([cost]) var_filter = VariableFilter(roles=[AUXILIARY]) aux_var_name = '_'.join([generator.name, generator.cost.name, 'per_sequence_element']) cost_per_el = [el for el in var_filter(cg.variables) if el.name == aux_var_name][0] assert cost_per_el.ndim == 0 cost_per_el_val = theano.function([y, mask], [cost_per_el])(y_test, m_test) assert_allclose(cost_per_el_val, 1.60942, rtol=1e-5) # Test generate states, outputs, costs = generator.generate( iterate=True, batch_size=batch_size, n_steps=n_steps) cg = ComputationGraph(states + outputs + costs) states_val, outputs_val, costs_val = theano.function( [], [states, outputs, costs], updates=cg.updates)() assert states_val.shape == (n_steps, batch_size, dim) assert outputs_val.shape == (n_steps, batch_size) assert outputs_val.dtype == 'int64' assert costs_val.shape == (n_steps, batch_size) assert_allclose(states_val.sum(), -17.91811, rtol=1e-5) assert_allclose(costs_val.sum(), 482.863, rtol=1e-5) assert outputs_val.sum() == 630 # Test masks agnostic results of cost cost1 = costs_fun([[1], [2]], [[1], [1]])[0] cost2 = costs_fun([[3, 1], [4, 2], [2, 0]], [[1, 1], [1, 1], [1, 0]])[0] assert_allclose(cost1.sum(), cost2[:, 1].sum(), rtol=1e-5)
cg = ComputationGraph(cost) model = Model(cost) transition_matrix = VariableFilter(theano_name_regex="state_to_state")( cg.parameters) for matr in transition_matrix: matr.set_value(0.98 * np.eye(hidden_size_recurrent, dtype=floatX)) readouts = VariableFilter(applications=[generator.readout.readout], name_regex="output")(cg.variables)[0] mean, sigma, corr, weight, penup = emitter.components(readouts) emit = generator.generate(n_steps=400, iterate=True, attended=embed, attended_mask=context_mask, batch_size=embed.shape[1])[-4] function([x, x_mask, context, context_mask], cost)(x_tr[0], x_tr[1], x_tr[2], x_tr[3]) emit_fn = ComputationGraph(emit).get_theano_function() emit_fn(x_tr[3], x_tr[2])[0].shape min_sigma = sigma.min(axis=(0, 2)).copy(name="sigma_min") mean_sigma = sigma.mean(axis=(0, 2)).copy(name="sigma_mean") max_sigma = sigma.max(axis=(0, 2)).copy(name="sigma_max") min_mean = mean.min(axis=(0, 2)).copy(name="mu_min") mean_mean = mean.mean(axis=(0, 2)).copy(name="mu_mean") max_mean = mean.max(axis=(0, 2)).copy(name="mu_max")
def test_attention_transition(): inp_dim = 2 inp_len = 10 attended_dim = 3 attended_len = 11 batch_size = 4 n_steps = 30 transition = TestTransition(dim=inp_dim, attended_dim=attended_dim, name="transition") attention = SequenceContentAttention(transition.apply.states, match_dim=inp_dim, name="attention") mixer = Mixer( [name for name in transition.apply.sequences if name != 'mask'], attention.take_look.outputs[0], name="mixer") att_trans = AttentionTransition(transition, attention, mixer, name="att_trans") att_trans.weights_init = IsotropicGaussian(0.01) att_trans.biases_init = Constant(0) att_trans.initialize() attended = tensor.tensor3("attended") attended_mask = tensor.matrix("attended_mask") inputs = tensor.tensor3("inputs") inputs_mask = tensor.matrix("inputs_mask") states, glimpses, weights = att_trans.apply(input_=inputs, mask=inputs_mask, attended=attended, attended_mask=attended_mask) assert states.ndim == 3 assert glimpses.ndim == 3 assert weights.ndim == 3 input_vals = numpy.zeros((inp_len, batch_size, inp_dim), dtype=floatX) input_mask_vals = numpy.ones((inp_len, batch_size), dtype=floatX) attended_vals = numpy.zeros((attended_len, batch_size, attended_dim), dtype=floatX) attended_mask_vals = numpy.ones((attended_len, batch_size), dtype=floatX) func = theano.function([inputs, inputs_mask, attended, attended_mask], [states, glimpses, weights]) states_vals, glimpses_vals, weight_vals = func(input_vals, input_mask_vals, attended_vals, attended_mask_vals) assert states_vals.shape == input_vals.shape assert glimpses_vals.shape == (inp_len, batch_size, attended_dim) assert weight_vals.shape == (inp_len, batch_size, attended_len) # Test SequenceGenerator using AttentionTransition generator = SequenceGenerator(LinearReadout( readout_dim=inp_dim, source_names=["state"], emitter=TestEmitter(name="emitter"), name="readout"), transition=transition, attention=attention, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0), name="generator") outputs = tensor.tensor3('outputs') costs = generator.cost(outputs, attended=attended, attended_mask=attended_mask) costs_vals = costs.eval({ outputs: input_vals, attended: attended_vals, attended_mask: attended_mask_vals }) assert costs_vals.shape == (inp_len, batch_size) results = (generator.generate(n_steps=n_steps, batch_size=attended.shape[1], attended=attended, attended_mask=attended_mask)) assert len(results) == 5 states_vals, outputs_vals, glimpses_vals, weights_vals, costs_vals = ( theano.function([attended, attended_mask], results)(attended_vals, attended_mask_vals)) assert states_vals.shape == (n_steps, batch_size, inp_dim) assert states_vals.shape == outputs_vals.shape assert glimpses_vals.shape == (n_steps, batch_size, attended_dim) assert weights_vals.shape == (n_steps, batch_size, attended_len) assert costs_vals.shape == (n_steps, batch_size)
cg = ComputationGraph(cost) model = Model(cost) transition_matrix = VariableFilter( theano_name_regex = "state_to_state")(cg.parameters) for matr in transition_matrix: matr.set_value(0.98*np.eye(hidden_size_recurrent, dtype = floatX)) readouts = VariableFilter( applications = [generator.readout.readout], name_regex = "output")(cg.variables)[0] mean, sigma, corr, weight, penup = emitter.components(readouts) emit = generator.generate( n_steps = 400, batch_size = 8, iterate = True )[-2] #ipdb.set_trace() function([x, x_mask], cost)(x_tr[0],x_tr[1]) emit_fn = ComputationGraph(emit).get_theano_function() emit_fn() min_sigma = sigma.min(axis=(0,2)).copy(name="sigma_min") mean_sigma = sigma.mean(axis=(0,2)).copy(name="sigma_mean") max_sigma = sigma.max(axis=(0,2)).copy(name="sigma_max") min_mean = mean.min(axis=(0,2)).copy(name="mu_min") mean_mean = mean.mean(axis=(0,2)).copy(name="mu_mean")
class NoLookupDecoder(Initializable): """This is the decoder implementation without embedding layer or softmax. The target sentence is represented as a sequence of # vectors as defined by the sparse feature map. """ def __init__(self, vocab_size, embedding_dim, state_dim, att_dim, maxout_dim, representation_dim, attention_strategy='content', attention_sources='s', readout_sources='sfa', memory='none', memory_size=500, seq_len=50, init_strategy='last', theano_seed=None, **kwargs): """Creates a new decoder brick without embedding. Args: vocab_size (int): Target language vocabulary size embedding_dim (int): Size of feedback embedding layer state_dim (int): Number of hidden units att_dim (int): Size of attention match vector maxout_dim (int): Size of maxout layer representation_dim (int): Dimension of source annotations attention_strategy (string): Which attention should be used cf. ``_initialize_attention`` attention_sources (string): Defines the sources used by the attention model 's' for decoder states, 'f' for feedback readout_sources (string): Defines the sources used in the readout network. 's' for decoder states, 'f' for feedback, 'a' for attention (context vector) memory (string): Which external memory should be used (cf. ``_initialize_attention``) memory_size (int): Size of the external memory structure seq_len (int): Maximum sentence length init_strategy (string): How to initialize the RNN state (cf. ``GRUInitialState``) theano_seed: Random seed """ super(NoLookupDecoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.state_dim = state_dim self.representation_dim = representation_dim self.theano_seed = theano_seed # Initialize gru with special initial state self.transition = GRUInitialState(attended_dim=state_dim, init_strategy=init_strategy, dim=state_dim, activation=Tanh(), name='decoder') # Initialize the attention mechanism att_dim = att_dim if att_dim > 0 else state_dim self.attention, src_names = _initialize_attention( attention_strategy, seq_len, self.transition, representation_dim, att_dim, attention_sources, readout_sources, memory, memory_size) # Initialize the readout, note that SoftmaxEmitter emits -1 for # initial outputs which is used by LookupFeedBackWMT15 maxout_dim = maxout_dim if maxout_dim > 0 else state_dim readout = Readout( source_names=src_names, readout_dim=embedding_dim, emitter=NoLookupEmitter(initial_output=-1, readout_dim=embedding_dim, cost_brick=SquaredError()), # cost_brick=CategoricalCrossEntropy()), feedback_brick=TrivialFeedback(output_dim=embedding_dim), post_merge=InitializableFeedforwardSequence([ Bias(dim=maxout_dim, name='maxout_bias').apply, Maxout(num_pieces=2, name='maxout').apply, Linear(input_dim=maxout_dim / 2, output_dim=embedding_dim, use_bias=False, name='softmax0').apply, Logistic(name='softmax1').apply ]), merged_dim=maxout_dim) # Build sequence generator accordingly self.sequence_generator = SequenceGenerator( readout=readout, transition=self.transition, attention=self.attention, fork=Fork([ name for name in self.transition.apply.sequences if name != 'mask' ], prototype=Linear())) self.children = [self.sequence_generator] @application(inputs=[ 'representation', 'representation_mask', 'target_sentence_mask', 'target_sentence' ], outputs=['cost']) def cost(self, representation, representation_mask, target_sentence, target_sentence_mask): target_sentence = target_sentence.T target_sentence_mask = target_sentence_mask.T # Get the cost matrix cost = self.sequence_generator.cost_matrix( **{ 'mask': target_sentence_mask, 'outputs': target_sentence, 'attended': representation, 'attended_mask': representation_mask }) return (cost * target_sentence_mask).sum() / \ target_sentence_mask.shape[1] @application def generate(self, source_shape, representation, **kwargs): return self.sequence_generator.generate( n_steps=2 * source_shape[1], batch_size=source_shape[0], attended=representation, attended_mask=tensor.ones(source_shape).T, **kwargs)
extensions=[ FinishAfter(), TrainingDataMonitoring([cost], prefix="this_step", after_batch=True), TrainingDataMonitoring([cost], prefix="average", every_n_batches=100), Checkpoint(args.model, every_n_epochs=5), Printing(every_n_batches=100)]) main_loop.run() elif args.mode == "retrain": main_loop = load(open(args.model, "rb")) main_loop.run() elif args.mode == "sample": main_loop = load(open(args.model, "rb")) # get the one and only brick in the computation graph generator = main_loop.model.get_top_bricks()[0] sample = ComputationGraph(generator.generate( n_steps = args.sample_size, batch_size = 1, iterate = True )).get_theano_function() states, outputs, costs = [data[:, 0] for data in sample()] print "".join(corpus.decode(outputs)) else: assert False