def fetch_data(): """ Fetch features, labels and sequence_lengths from a common queue.""" tot_batch_size = ARGS.batch_size feats, labels, seq_lens = deepSpeech.inputs(eval_data='train', data_dir=ARGS.data_dir, batch_size=tot_batch_size, use_fp16=ARGS.use_fp16, shuffle=ARGS.shuffle) # Split features and labels and sequence lengths for each tower return feats, labels, seq_lens
def fetch_data(): """ Fetch features, labels and sequence_lengths from a common queue.""" tot_batch_size = ARGS.batch_size * 1 feats, labels, seq_lens = deepSpeech.inputs(eval_data='train', data_dir=ARGS.data_dir, batch_size=tot_batch_size, use_fp16=ARGS.use_fp16, shuffle=ARGS.shuffle) # Split features and labels and sequence lengths for each tower split_feats = tf.split(feats, 1, 0) split_labels = tf.sparse_split(sp_input=labels, num_split=1, axis=0) split_seq_lens = tf.split(seq_lens, 1, 0) return split_feats, split_labels, split_seq_lens
def evaluate(): """ Evaluate deepSpeech modelfor a number of steps.""" with tf.Graph().as_default() as graph: # Get feats and labels for deepSpeech. feats, labels, seq_lens = deepSpeech.inputs(ARGS.eval_data, data_dir=ARGS.data_dir, batch_size=ARGS.batch_size, use_fp16=ARGS.use_fp16, shuffle=True) session = tf.Session() # Build ops that computes the logits predictions from the # inference model. ARGS.keep_prob = 1.0 # Disable dropout during testing. logits = deepSpeech.inference(session, feats, seq_lens, ARGS) # Calculate predictions. output_log_prob = tf.nn.log_softmax(logits) decoder = tf.nn.ctc_greedy_decoder strided_seq_lens = deepSpeech.get_rnn_seqlen(seq_lens) predictions = decoder(output_log_prob, strided_seq_lens) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( ARGS.moving_avg_decay) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(ARGS.eval_dir, graph) while True: eval_once(session, saver, summary_writer, predictions, summary_op, labels) if ARGS.run_once: break time.sleep(ARGS.eval_interval_secs)