# remove file ending model_name = model_name[:model_name.find('.json')] # In[5]: print('Building model graph...') tf.reset_default_graph() batch_size = tf.placeholder_with_default(1, [], name='batch_size') # input_op, seq_len, label = data.ops.get_batch_producer( # batch_size=batch_size, path='./data/train.TFRecord') input_op = tf.placeholder(tf.float32, [1, None]) seq_len = tf.placeholder(tf.float32, [1]) c = cnn.model(seq_len=seq_len, input_op=input_op, **cnn_params) r = rnn.get_model(batch_size=batch_size, seq_len=seq_len, input_op=c.output, **rnn_params) f = fourier.get_output(seq_len=seq_len, input_op=input_op, **fourier_params) td = time_domain.get_output(seq_len=seq_len, input_op=input_op, **time_domain_params) concatenated_features = tf.concat([r.last_output, f, td], 1) fc = classifier.model(input_op=concatenated_features, **fc_params) logits = fc.logits pred = fc.pred print('Building model... done!') # Load recording print('Loading record...', end=' ') # dir = "./validation/"
batch = 128 optimizer = 'rmsprop' seq = 5 new_words = 1000 temperature = 0.5 file = inp(text, seq) file.text_seq() x, y = file.rnn_input() rnn = rnn(text, x, y, layer1=layer, dropout=drop, epochs=epochs, batch=batch, optimizer=optimizer) rnn.define() # rnn.load() rnn.train() new = output(file.get_content(), seq=seq, words=new_words, temp=temperature) vocab, dict1, dict2 = file.get_vocab() new_text = new.generate(rnn.get_model(), vocab, dict1, dict2) # print new_text file.save(new_text)