Beispiel #1
0
def main(_):
    initialize_globals()
    input_dir = './data/CommonVoice/pertubed_sets'

    output_dir = './results/randomly-initialized'
    tfv1.reset_default_graph()
    activations_common_voice_pertubed_sets(input_dir=input_dir,
                                           output_dir=output_dir,
                                           test_only=True,
                                           randomly_initialized=True)

    output_dir = './results'

    # Obtain activations for all sets without pruning of common voice test set
    tfv1.reset_default_graph()
    activations_common_voice_pertubed_sets(input_dir=input_dir,
                                           output_dir=output_dir)

    # Obtain activations for non-training sets with pruning of common voice test set
    tfv1.reset_default_graph()
    activations_peractivations_common_voice_pertubed_setstubed_sets(
        input_dir=input_dir,
        output_dir=output_dir,
        test_only=True,
        prune_percentage=.1,
        scores_file='./results/activations_combined.npy')

    # Obtain activations for non-training sets with pruning of common voice test set
    tfv1.reset_default_graph()
    activations_peractivations_common_voice_pertubed_setstubed_sets(
        input_dir=input_dir,
        output_dir=output_dir,
        test_only=True,
        random=True,
        prune_percentage=.1,
        scores_file='./results/activations_combined.npy')

    # Obtain activations for all sets without pruning of librispeech validation set
    tfv1.reset_default_graph()
    activations_libri_speech_test_set(input_dir=input_dir,
                                      output_dir=output_dir)

    # Obtain activations for non-training sets with pruning of librispeech validation set
    tfv1.reset_default_graph()
    activations_libri_speech_test_set(
        input_dir=input_dir,
        output_dir=output_dir,
        test_only=True,
        prune_percentage=.1,
        scores_file='./results/activations_combined.npy')

    # Obtain activations for non-training sets with random pruning of librispeech validation set
    tfv1.reset_default_graph()
    activations_libri_speech_test_set(
        input_dir=input_dir,
        output_dir=output_dir,
        test_only=True,
        random=True,
        prune_percentage=.1,
        scores_file='./results/activations_combined.npy')
def main(_):
    initialize_globals()

    if FLAGS.train or FLAGS.test:
        if len(FLAGS.worker_hosts) == 0:
            # Only one local task: this process (default case - no cluster)
            with tf.Graph().as_default():
                train()
            if Config.is_chief:
                export()
            # Now do a final test epoch
            if FLAGS.test:
                with tf.Graph().as_default():
                    test()
            log_debug('Done.')
        else:
            # Create and start a server for the local task.
            server = tf.train.Server(Config.cluster,
                                     job_name=FLAGS.job_name,
                                     task_index=FLAGS.task_index)
            if FLAGS.job_name == 'ps':
                # We are a parameter server and therefore we just wait for all workers to finish
                # by waiting for their stop tokens.
                with tf.Session(server.target) as session:
                    for worker in FLAGS.worker_hosts:
                        log_debug('Waiting for stop token...')
                        token = session.run(
                            Config.done_dequeues[FLAGS.task_index])
                        if token < 0:
                            log_debug(
                                'Got a kill switch token from worker %i.' %
                                abs(token + 1))
                            break
                        log_debug('Got a stop token from worker %i.' % token)
                log_debug('Session closed.')

                if FLAGS.test:
                    test()
            elif FLAGS.job_name == 'worker':
                # We are a worker and therefore we have to do some work.
                # Assigns ops to the local worker by default.
                with tf.device(
                        tf.train.replica_device_setter(
                            worker_device=Config.worker_device,
                            cluster=Config.cluster)):

                    # Do the training
                    train(server)

            log_debug('Server stopped.')

    # Are we the main process?
    if Config.is_chief:
        # Doing solo/post-processing work just on the main process...
        # Exporting the model
        if FLAGS.export_dir:
            export()

    if len(FLAGS.one_shot_infer):
        do_single_file_inference(FLAGS.one_shot_infer)
Beispiel #3
0
def main(_):
    initialize_globals()

    if FLAGS.train_files:
        tfv1.reset_default_graph()
        tfv1.set_random_seed(FLAGS.random_seed)
        train()

    if FLAGS.test_files:
        tfv1.reset_default_graph()
        test()

    if FLAGS.export_dir and not FLAGS.export_zip:
        tfv1.reset_default_graph()
        export()

    if FLAGS.export_zip:
        tfv1.reset_default_graph()
        FLAGS.export_tflite = True

        if os.listdir(FLAGS.export_dir):
            log_error('Directory {} is not empty, please fix this.'.format(FLAGS.export_dir))
            sys.exit(1)

        export()
        package_zip()

    if FLAGS.one_shot_infer:
        tfv1.reset_default_graph()
        do_single_file_inference(FLAGS.one_shot_infer)
def total_inference(input_folder, output_folder, checkpoint_dir, layer_wanted,
                    softmax_wanted, win_size_s, stride_size_s, fea_format,
                    csv_format):
    initialize_globals()

    for root, dirs, files in os.walk(input_folder):
        nb = len(files)
        it = 0
        # Iterate over files
        for filename in files:
            print(filename)
            if ((it + 1) % 100) == 0:
                print(it, 'on', nb)
            it += 1
            if not filename.endswith('.wav'):
                continue
            else:
                full_name = os.path.join(root.lstrip('./'), filename)
                tf.reset_default_graph()
                do_single_file_inference(checkpoint_dir=checkpoint_dir,
                                         input_file_path='/' + full_name,
                                         layer_wanted=layer_wanted,
                                         win_size_s=win_size_s,
                                         stride_size_s=stride_size_s,
                                         save_folder=output_folder,
                                         save_filename=filename[:-4],
                                         softmax_wanted=softmax_wanted,
                                         fea_format=fea_format,
                                         csv_format=csv_format)
Beispiel #5
0
def main(_):
    initialize_globals()

    if not FLAGS.test_files:
        log_error('You need to specify what files to use for evaluation via '
                  'the --test_files flag.')
        exit(1)

    global alphabet
    alphabet = Alphabet(FLAGS.alphabet_config_path)

    # sort examples by length, improves packing of batches and timesteps
    test_data = preprocess(
        FLAGS.test_files.split(','),
        FLAGS.test_batch_size,
        alphabet=alphabet,
        numcep=Config.n_input,
        numcontext=Config.n_context,
        hdf5_cache_path=FLAGS.hdf5_test_set).sort_values(
        by="features_len",
        ascending=False)

    from DeepSpeech import create_inference_graph
    graph = create_inference_graph(batch_size=FLAGS.test_batch_size, n_steps=-1)

    samples = evaluate(test_data, graph, alphabet)

    if FLAGS.test_output_file:
        # Save decoded tuples as JSON, converting NumPy floats to Python floats
        json.dump(samples, open(FLAGS.test_output_file, 'w'), default=lambda x: float(x))
Beispiel #6
0
def transcribe_file(audio_path, tlog_path):
    from DeepSpeech import create_model  # pylint: disable=cyclic-import,import-outside-toplevel
    from util.checkpoints import load_or_init_graph
    initialize_globals()
    scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta, FLAGS.scorer_path,
                    Config.alphabet)
    try:
        num_processes = cpu_count()
    except NotImplementedError:
        num_processes = 1
    with AudioFile(audio_path, as_path=True) as wav_path:
        data_set = split_audio_file(
            wav_path,
            batch_size=FLAGS.batch_size,
            aggressiveness=FLAGS.vad_aggressiveness,
            outlier_duration_ms=FLAGS.outlier_duration_ms,
            outlier_batch_size=FLAGS.outlier_batch_size)
        iterator = tf.data.Iterator.from_structure(
            data_set.output_types,
            data_set.output_shapes,
            output_classes=data_set.output_classes)
        batch_time_start, batch_time_end, batch_x, batch_x_len = iterator.get_next(
        )
        no_dropout = [None] * 6
        logits, _ = create_model(batch_x=batch_x,
                                 seq_length=batch_x_len,
                                 dropout=no_dropout)
        transposed = tf.nn.softmax(tf.transpose(logits, [1, 0, 2]))
        tf.train.get_or_create_global_step()
        with tf.Session(config=Config.session_config) as session:
            if FLAGS.load == 'auto':
                method_order = ['best', 'last']
            else:
                method_order = [FLAGS.load]
            load_or_init_graph(session, method_order)
            session.run(iterator.make_initializer(data_set))
            transcripts = []
            while True:
                try:
                    starts, ends, batch_logits, batch_lengths = \
                        session.run([batch_time_start, batch_time_end, transposed, batch_x_len])
                except tf.errors.OutOfRangeError:
                    break
                decoded = ctc_beam_search_decoder_batch(
                    batch_logits,
                    batch_lengths,
                    Config.alphabet,
                    FLAGS.beam_width,
                    num_processes=num_processes,
                    scorer=scorer)
                decoded = list(d[0][1] for d in decoded)
                transcripts.extend(zip(starts, ends, decoded))
            transcripts.sort(key=lambda t: t[0])
            transcripts = [{
                'start': int(start),
                'end': int(end),
                'transcript': transcript
            } for start, end, transcript in transcripts]
            with open(tlog_path, 'w') as tlog_file:
                json.dump(transcripts, tlog_file, default=float)
Beispiel #7
0
def main(_):
    initialize_globals()
    if FLAGS.cmd != 'classify' and FLAGS.cmd != 'attack':
        print('Unsupport command, please check your cmd parameter')
    else:
        adv_ds = AdvDeepSpeech(FLAGS.model_path, FLAGS.input)
        if FLAGS.cmd == 'classify':
            adv_ds.classify2(FLAGS.input)
        else:
            adv_ds.attack(FLAGS.iteration)
Beispiel #8
0
def main(_):
    initialize_globals()

    # Evaluate on:
    # - files used for pruning (per set prune or total prune)
    # - original test set for model

    pertubed_sets = json.load(open('data/pertubed_input_sets_balanced.json'))
    train_sets = json.load(open('./results/set_ids_used.json'))
    common_voice_info = get_file_info('./data/common-voice-pertubed_sets.csv')

    file_info = []
    for set in pertubed_sets:
        if str(set['set_id']) in train_sets: continue
        for item in set['set_items']:
            filename = item['path'][:-4]
            file_info.append(common_voice_info[filename])

    print('{} test files found...'.format(len(file_info)))

    # Clean up characters in case they are in the transcript
    not_allowed = [',', '.', '!', '?', '"', '-', ':', ';']
    for info in file_info:
        if any(c in info['transcript'] for c in not_allowed):
            for c in not_allowed:
                info['transcript'] = info['transcript'].replace(c, '')

    # Prune on all pertubed sets combined and evaluate on test set
    percents = [0, .05, .1, .2]
    print('Evaluating pruning on common voice test set')
    evaluation = {}
    for percent in percents:
        results = evaluate(scores_path='./results/activations_combined.npy',
                           prune_percent=percent,
                           evaluate_files=file_info)
        evaluation['{}'.format(percent)] = results
        print(results)
    json.dump(evaluation,
              open('./results/evaluations_all_pertubated_sets.json', 'w+'))

    # Prune and evaluate on original test set
    percents = [0, .1]
    print('Evaluating pruning on original test set')
    file_info = get_file_info('./data/librivox-test-clean.csv')
    evaluation = {}
    for percent in percents:
        results = evaluate(scores_path='./results/activations_combined.npy',
                           prune_percent=percent,
                           evaluate_files=file_info.values())

        evaluation['{}'.format(percent)] = results
        print(results)

    json.dump(evaluation,
              open('./results/evaluations_original_test_set.json', 'w+'))
def main(_):
    initialize_globals()

    if FLAGS.train or FLAGS.test:
        if len(FLAGS.worker_hosts) == 0:
            # Only one local task: this process (default case - no cluster)
            with tf.Graph().as_default():
                tf.set_random_seed(FLAGS.random_seed)
                train()
            # Now do a final test epoch
            if FLAGS.test:
                with tf.Graph().as_default():
                    test()
            log_debug('Done.')
        else:
            # Create and start a server for the local task.
            server = tf.train.Server(Config.cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
            if FLAGS.job_name == 'ps':
                # We are a parameter server and therefore we just wait for all workers to finish
                # by waiting for their stop tokens.
                with tf.Session(server.target) as session:
                    for worker in FLAGS.worker_hosts:
                        log_debug('Waiting for stop token...')
                        token = session.run(Config.done_dequeues[FLAGS.task_index])
                        if token < 0:
                            log_debug('Got a kill switch token from worker %i.' % abs(token + 1))
                            break
                        log_debug('Got a stop token from worker %i.' % token)
                log_debug('Session closed.')

                if FLAGS.test:
                    test()
            elif FLAGS.job_name == 'worker':
                # We are a worker and therefore we have to do some work.
                # Assigns ops to the local worker by default.
                with tf.device(tf.train.replica_device_setter(
                               worker_device=Config.worker_device,
                               cluster=Config.cluster)):

                    # Do the training
                    train(server)

            log_debug('Server stopped.')

    # Are we the main process?
    if Config.is_chief:
        # Doing solo/post-processing work just on the main process...
        # Exporting the model
        if FLAGS.export_dir:
            export()

    if len(FLAGS.one_shot_infer):
        do_single_file_inference(FLAGS.one_shot_infer)
Beispiel #10
0
def main(_):
    initialize_globals()

    if not FLAGS.test_files:
        log_error('You need to specify what files to use for evaluation via '
                  'the --test_files flag.')
        exit(1)

    from DeepSpeech import create_model, try_loading # pylint: disable=cyclic-import
    samples = evaluate(FLAGS.test_files.split(','), create_model, try_loading)

    if FLAGS.test_output_file:
        # Save decoded tuples as JSON, converting NumPy floats to Python floats
        json.dump(samples, open(FLAGS.test_output_file, 'w'), default=float)
Beispiel #11
0
def main(_):
    initialize_globals()

    if not FLAGS.test_files:
        log_error('You need to specify what files to use for evaluation via '
                  'the --test_files flag.')
        exit(1)

    from DeepSpeech import create_model, try_loading  # pylint: disable=cyclic-import
    samples = evaluate(FLAGS.test_files.split(','), create_model, try_loading)

    if FLAGS.test_output_file:
        # Save decoded tuples as JSON, converting NumPy floats to Python floats
        json.dump(samples, open(FLAGS.test_output_file, 'w'), default=float)
Beispiel #12
0
def main(_):
    initialize_globals()

    evaluation_csv = './data/librivox-test-clean.csv'
    results_file = './results/evaluation_output.txt'
    scores_file = './results/final_imp_scores.npy'

    for prune_settings in [(0, False), (.05, False), (.05, True), (.1, False),
                           (.1, True), (.15, False), (.15, True)]:
        evaluate_with_pruning(evaluation_csv,
                              create_model,
                              try_loading,
                              prune_settings[0],
                              random=prune_settings[1],
                              scores_file=scores_file,
                              result_file=results_file)
def main(_):
    initialize_globals()

    if not FLAGS.test_files:
        log_error('You need to specify what files to use for evaluation via '
                  'the --test_files flag.')
        sys.exit(1)

    is_character_based = character_based()

    study = optuna.create_study()
    study.set_user_attr("is_character_based", is_character_based)
    study.optimize(objective, n_jobs=1, n_trials=FLAGS.n_trials)
    print('Best params: lm_alpha={} and lm_beta={} with WER={}'.format(
        study.best_params['lm_alpha'], study.best_params['lm_beta'],
        study.best_value))
Beispiel #14
0
def main(_):
    initialize_globals()

    if FLAGS.train:
        with tf.Graph().as_default():
            tf.set_random_seed(FLAGS.random_seed)
            train()

    if FLAGS.test:
        with tf.Graph().as_default():
            test()

    if FLAGS.export_dir:
        export()

    if len(FLAGS.one_shot_infer):
        do_single_file_inference(FLAGS.one_shot_infer)
Beispiel #15
0
def main(_):
    initialize_globals()

    if FLAGS.train_files:
        tfv1.reset_default_graph()
        tfv1.set_random_seed(FLAGS.random_seed)
        train()

    if FLAGS.test_files:
        tfv1.reset_default_graph()
        test()

    if FLAGS.export_dir:
        tfv1.reset_default_graph()
        export()

    if FLAGS.one_shot_infer:
        tfv1.reset_default_graph()
        do_single_file_inference(FLAGS.one_shot_infer)
Beispiel #16
0
def main(_):
    initialize_globals()

    if FLAGS.train or FLAGS.test:
        # Only one local task: this process (default case - no cluster)
        with tf.Graph().as_default():
            tf.set_random_seed(FLAGS.random_seed)
            train()
        # Now do a final test epoch
        if FLAGS.test:
            with tf.Graph().as_default():
                test()
        log_debug('Done.')

    # Are we the main process?
    if Config.is_chief:
        # Doing solo/post-processing work just on the main process...
        # Exporting the model
        if FLAGS.export_dir:
            export()
Beispiel #17
0
def main(_):
    initialize_globals()

    if not FLAGS.test_files:
        log_error('You need to specify what files to use for evaluation via '
                  'the --test_files flag.')
        exit(1)
    #if FLAGS.embeddings_output_dir:
    #    prefix = FLAGS.embeddings_output_dir
    #    print('Prefix :', prefix)
    #    #print('LAYER4 :', LAYER4)
    #    EMBEDDINGS = prefix + 'embeddings/'
    #    LAYER4 = EMBEDDINGS + 'layer4/'
    #    LAYER5 = EMBEDDINGS + 'layer5/'
    #    LAYER6 = EMBEDDINGS + 'layer6/'
    #    c.TEXT = EMBEDDINGS + 'text/'
    #    print('LAYER4 :', LAYER4)
    # sort examples by length, improves packing of batches and timesteps
    test_data = preprocess(FLAGS.test_files.split(','),
                           FLAGS.test_batch_size,
                           alphabet=Config.alphabet,
                           numcep=Config.n_input,
                           numcontext=Config.n_context,
                           hdf5_cache_path=FLAGS.hdf5_test_set).sort_values(
                               by="features_len", ascending=False)
    #print('test_data', test_data)
    #print(test_data.fname[1])
    #return 1
    #print(test_data[0].fname)
    print('Batch Size: ', FLAGS.test_batch_size)
    from DeepSpeech import create_inference_graph
    graph = create_inference_graph(batch_size=FLAGS.test_batch_size,
                                   n_steps=-1)

    samples = evaluate(test_data, graph)

    if FLAGS.test_output_file:
        # Save decoded tuples as JSON, converting NumPy floats to Python floats
        json.dump(samples,
                  open(FLAGS.test_output_file, 'w'),
                  default=lambda x: float(x))
Beispiel #18
0

def load_graph(frozen_graph_filename=GRAPH_PB_PATH):
    # We load the protobuf file from the disk and parse it to retrieve the
    # unserialized graph_def
    with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    return graph_def


if __name__ == '__main__':
    # Initialize Globals.
    create_flags()
    initialize_globals()

    # Load pretrained model.
    graph_def = load_graph()
    with tf.Graph().as_default() as graph:
        # The name var will prefix every op/nodes in your graph
        # Since we load everything in a new graph, this is not needed
        tf.import_graph_def(graph_def, name="prefix")

        # Open tf.Session.
        with tf.Session(graph=graph) as sess:

            # Extract graph node names.
            tf.import_graph_def(graph_def, name='')
            graph_nodes = [n for n in graph_def.node]
            names = []
Beispiel #19
0
def main(_):
    initialize_globals()

    if FLAGS.train or FLAGS.test:
        if len(FLAGS.worker_hosts) == 0:
            # Only one local task: this process (default case - no cluster)
            #with tf.Graph().as_default():
            #train()
            #if Config.is_chief:
            #    export()
            # Now do a final test epoch
            if FLAGS.test:
                print("$$$$$$$$$ Testing on entire test dataset $$$$$$$$$$")
                ckpt_files = [
                    f for f in sorted(os.listdir(FLAGS.checkpoint_dir))
                    if os.path.isfile(os.path.join(FLAGS.checkpoint_dir, f))
                    and '.meta' in f
                ]
                for ckpt_file in ckpt_files:
                    print("************* Testing on ckpt file: " + ckpt_file +
                          "   ***************")
                    with tf.Graph().as_default():
                        test(ckpt_file.replace(".meta", ""), FLAGS.test_files)
                    log_debug('Done.')
                for test_file in FLAGS.test_files.split(","):
                    print("$$$$$$$$$ Testing on " + test_file +
                          " dataset $$$$$$$$$$")
                    ckpt_files = [
                        f for f in sorted(os.listdir(FLAGS.checkpoint_dir))
                        if os.path.isfile(os.path.join(FLAGS.checkpoint_dir,
                                                       f)) and '.meta' in f
                    ]
                    for ckpt_file in ckpt_files:
                        print("************* Testing on ckpt file: " +
                              ckpt_file + "   ***************")
                        with tf.Graph().as_default():
                            test(ckpt_file.replace(".meta", ""), test_file)
                        log_debug('Done.')

        else:
            # Create and start a server for the local task.
            server = tf.train.Server(Config.cluster,
                                     job_name=FLAGS.job_name,
                                     task_index=FLAGS.task_index)
            if FLAGS.job_name == 'ps':
                # We are a parameter server and therefore we just wait for all workers to finish
                # by waiting for their stop tokens.
                with tf.Session(server.target) as session:
                    for worker in FLAGS.worker_hosts:
                        log_debug('Waiting for stop token...')
                        token = session.run(
                            Config.done_dequeues[FLAGS.task_index])
                        if token < 0:
                            log_debug(
                                'Got a kill switch token from worker %i.' %
                                abs(token + 1))
                            break
                        log_debug('Got a stop token from worker %i.' % token)
                log_debug('Session closed.')

                if FLAGS.test:
                    test()
            elif FLAGS.job_name == 'worker':
                # We are a worker and therefore we have to do some work.
                # Assigns ops to the local worker by default.
                with tf.device(
                        tf.train.replica_device_setter(
                            worker_device=Config.worker_device,
                            cluster=Config.cluster)):

                    # Do the training
                    train(server)

            log_debug('Server stopped.')

    # Are we the main process?
    #if Config.is_chief:
    # Doing solo/post-processing work just on the main process...
    # Exporting the model
    #if FLAGS.export_dir:
    #export()

    if len(FLAGS.one_shot_infer):
        do_single_file_inference(FLAGS.one_shot_infer)