def get_train_data_stats(options): encoder_inputs, target_labels, num_examples, words, decoder_inputs, \ target_labels_lengths, encoder_inputs_lengths, decoder_inputs_lengths = get_split3(options) number_of_steps_per_epoch = 5 #num_examples // options['batch_size'] + 1 sess = start_interactive_session() eim = [] eivar = [] tlm = [] tlvar = [] eilm = [] for i in range(number_of_steps_per_epoch): print("step %d of %d" % (i + 1, number_of_steps_per_epoch)) ei, tl, eil = sess.run([ tf.nn.moments(encoder_inputs, [0, 1]), tf.nn.moments(target_labels, [0, 1]), tf.reduce_mean(encoder_inputs_lengths) ]) eim.append(ei[0]) eivar.append(ei[1]) tlm.append(tl[0]) tlvar.append(tl[1]) eilm.append(eil) eim = np.stack(eim, axis=0) #.mean(axis=0) eivar = np.stack(eivar, axis=0) #.mean(axis=0) tlm = np.stack(tlm, axis=0) #.mean(axis=0) tlvar = np.stack(tlvar, axis=0) #.mean(axis=0) eilm = np.max(eilm) return eim, eivar, tlm, tlvar, eilm
'save_graph': False, 'save_dir': "/data/mat10/Projects/audio23d/Models/rnn_plus/summaries", 'save_summaries': True } #from data_provider import get_split #raw_audio, mfcc, target_labels, \ #num_examples, word, decoder_inputs, \ #label_lengths, mfcc_lengths, decoder_inputs_lengths = get_split(options) #raw_audio, mfcc, label, num_examples, word = get_split() if True: model = RNNplusModel(options) sess = start_interactive_session() if options['save_graph']: model.save_graph(sess) if options['restore']: model.restore_model(sess) if options['is_training']: model.train(sess) else: loss = model.eval(sess, return_words=False) if False: losses = {} for ep in range(1, 54): options['restore_model'] = "/data/mat10/Projects/audio23d/Models/rnn_plus/rnnplus_all_melf_era1_epoch%d_step302" % ep model = RNNplusModel(options) sess = start_interactive_session()