return samples samples = [] for csv, init_op in zip(test_csvs, test_init_ops): print('Testing model on {}'.format(csv)) samples.extend(run_test(init_op, dataset=csv)) return samples def main(_): initialize_globals() if not FLAGS.test_files: log_error('You need to specify what files to use for evaluation via ' 'the --test_files flag.') exit(1) from DeepSpeech import create_model, try_loading # pylint: disable=cyclic-import samples = evaluate(FLAGS.test_files.split(','), create_model, try_loading) if FLAGS.test_output_file: # Save decoded tuples as JSON, converting NumPy floats to Python floats json.dump(samples, open(FLAGS.test_output_file, 'w'), default=float) if __name__ == '__main__': create_flags() tf.app.flags.DEFINE_string('test_output_file', '', 'path to a file to save all src/decoded/distance/loss tuples') tf.app.run(main)
if FLAGS.test: test() elif FLAGS.job_name == 'worker': # We are a worker and therefore we have to do some work. # Assigns ops to the local worker by default. with tf.device( tf.train.replica_device_setter( worker_device=Config.worker_device, cluster=Config.cluster)): # Do the training train(server) log_debug('Server stopped.') # Are we the main process? if Config.is_chief: # Doing solo/post-processing work just on the main process... # Exporting the model if FLAGS.export_dir: export() if len(FLAGS.one_shot_infer): do_single_file_inference(FLAGS.one_shot_infer) if __name__ == '__main__': create_flags() tf.app.run(main)