Esempio n. 1
0
def fetch_data():
    """ Fetch features, labels and sequence_lengths from a common queue."""
    tot_batch_size = ARGS.batch_size 
    with tf.device('/cpu'):
        feats, labels, seq_lens = deepSpeech.inputs(eval_data='train',
                                                    data_dir=ARGS.data_dir,
                                                    batch_size=tot_batch_size,
                                                    use_fp16=ARGS.use_fp16,
                                                    shuffle=ARGS.shuffle)
        dense_labels = tf.sparse_tensor_to_dense(labels)
        tf.Print(dense_labels, [dense_labels], "labels")

    # Split features and labels and sequence lengths for each tower
    return feats, labels, seq_lens
Esempio n. 2
0
def fetch_data():
    """ Fetch features, labels and sequence_lengths from a common queue."""

    tot_batch_size = ARGS.batch_size * ARGS.num_gpus
    feats, labels, seq_lens = deepSpeech.inputs(eval_data='train',
                                                data_dir=ARGS.data_dir,
                                                batch_size=tot_batch_size,
                                                use_fp16=ARGS.use_fp16,
                                                shuffle=ARGS.shuffle)

    # Split features and labels and sequence lengths for each tower
    split_feats = tf.split(0, ARGS.num_gpus, feats)
    split_labels = tf.sparse_split(0, ARGS.num_gpus, labels)
    split_seq_lens = tf.split(0, ARGS.num_gpus, seq_lens)

    return split_feats, split_labels, split_seq_lens
Esempio n. 3
0
def evaluate():
    """ Evaluate deepSpeech modelfor a number of steps."""
    if ARGS.lm_model != None:
        print(ARGS.lm_model)
        lm = LM(ARGS.lm_model)
    else:
        lm = None

    with tf.Graph().as_default() as graph:

        # Get feats and labels for deepSpeech.
        feats, labels, seq_lens = deepSpeech.inputs(ARGS.eval_data,
                                                    data_dir=ARGS.data_dir,
                                                    batch_size=ARGS.batch_size,
                                                    use_fp16=ARGS.use_fp16,
                                                    shuffle=True)

        # Build ops that computes the logits predictions from the
        # inference model.
        ARGS.keep_prob = 1.0  # Disable dropout during testing.
        logits = deepSpeech.inference(feats, seq_lens, ARGS)

        # Calculate predictions.
        output_log_prob = tf.nn.softmax(logits)
        decoder = tf.nn.ctc_greedy_decoder
        strided_seq_lens = tf.div(seq_lens, ARGS.temporal_stride)
        predictions = decoder(output_log_prob, strided_seq_lens)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            ARGS.moving_avg_decay)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(ARGS.eval_dir, graph)

        while True:
            eval_once(saver, summary_writer, predictions, summary_op, labels,
                      output_log_prob, lm)

            if ARGS.run_once:
                break
            time.sleep(ARGS.eval_interval_secs)