def run(args): config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(0) assert args.saved_model if args.tfrecords: test_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["test_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "test", augmentations=config["learning_config"]["augmentations"], shuffle=False).create( config["learning_config"]["running_config"]["batch_size"]) else: test_dataset = ASRSliceDataset( stage="test", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["eval_paths"], shuffle=False).create( config["learning_config"]["running_config"]["batch_size"]) # build model f, c = speech_featurizer.compute_feature_dim() conformer = Conformer(vocabulary_size=text_featurizer.num_classes, **config["model_config"]) conformer._build([1, 50, f, c]) conformer.summary(line_length=100) conformer_tester = BaseTester( config=config["learning_config"]["running_config"], saved_path=args.saved_model, from_weights=args.from_weights) conformer_tester.compile(conformer, speech_featurizer, text_featurizer) conformer_tester.run(test_dataset)
def main(argv): speech_file = argv[1] feature_type = argv[2] speech_conf = { "sample_rate": 16000, "frame_ms": 25, "stride_ms": 10, "feature_type": feature_type, "preemphasis": 0.97, "normalize_signal": True, "normalize_feature": True, "normalize_per_feature": False, "num_feature_bins": 80, } signal = read_raw_audio(speech_file, speech_conf["sample_rate"]) sf = TFSpeechFeaturizer(speech_conf) ft = sf.extract(signal)[:, :, 0] plt.figure(figsize=(15, 5)) plt.imshow(ft.T, origin="lower") plt.colorbar() plt.tight_layout() plt.show()
parser.add_argument("--saved", type=str, default=None, help="Path to saved model") parser.add_argument("output", type=str, default=None, help="TFLite file path to be exported") args = parser.parse_args() assert args.saved and args.output config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = CharFeaturizer(config["decoder_config"]) # build model conformer = Conformer(**config["model_config"], vocabulary_size=text_featurizer.num_classes) conformer._build(speech_featurizer.shape) conformer.load_weights(args.saved) conformer.summary(line_length=150) conformer.add_featurizers(speech_featurizer, text_featurizer) concrete_func = conformer.make_tflite_function( greedy=True).get_concrete_function() converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [
data = "/mnt/Data/ML/ASR/Raw/LibriSpeech/train-clean-100/transcripts.tsv" text_featurizer = CharFeaturizer({ "vocabulary": None, "blank_at_zero": True, "beam_width": 5, "norm_score": True }) speech_featurizer = TFSpeechFeaturizer({ "sample_rate": 16000, "frame_ms": 25, "stride_ms": 10, "num_feature_bins": 80, "feature_type": "log_mel_spectrogram", "preemphasis": 0.97, "normalize_signal": True, "normalize_feature": True, "normalize_per_feature": False }) dataset = ASRSliceDataset(stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=[data], augmentations=augments, shuffle=True).create(4).take(100) while True: print("--------------------------------------------")
from tiramisu_asr.optimizers.schedules import TransformerSchedule from multiconformers_trainer import MultiConformersTrainer from multiconformers_dataset import MultiConformersTFRecordDataset, MultiConformersSliceDataset config = UserConfig(DEFAULT_YAML, args.config, learning=True) lms_config = config["speech_config"] lms_config["feature_type"] = "log_mel_spectrogram" lgs_config = config["speech_config"] lgs_config["feature_type"] = "log_gammatone_spectrogram" if args.nfx: speech_featurizer_lms = NumpySpeechFeaturizer(lms_config) speech_featurizer_lgs = NumpySpeechFeaturizer(lgs_config) else: speech_featurizer_lms = TFSpeechFeaturizer(lms_config) speech_featurizer_lgs = TFSpeechFeaturizer(lgs_config) if args.subwords_prefix and os.path.exists(f"{args.subwords_prefix}.subwords"): print("Loading subwords ...") text_featurizer = SubwordFeaturizer.load_from_file( config["decoder_config"], args.subwords_prefix) else: print("Generating subwords ...") text_featurizer = SubwordFeaturizer.build_from_corpus( config["decoder_config"], corpus_files=args.subwords_corpus) text_featurizer.subwords.save_to_file(args.subwords_prefix) if args.tfrecords: train_dataset = MultiConformersTFRecordDataset( data_paths=config["learning_config"]["dataset_config"]["train_paths"],
def run(args): config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(2020) if args.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy( "mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) print("Enabled mixed precision training") if args.tfrecords: train_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["train_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "train", augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["eval_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "eval", shuffle=False) else: train_dataset = ASRSliceDataset( stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["train_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRSliceDataset(stage="eval", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=False) conformer_trainer = TransducerTrainer( config=config["learning_config"]["running_config"], text_featurizer=text_featurizer, is_mixed_precision=args.mixed_precision) with conformer_trainer.strategy.scope(): # build model f, c = speech_featurizer.compute_feature_dim() conformer = Conformer(**config["model_config"], vocabulary_size=text_featurizer.num_classes) conformer._build([1, 50, f, c]) optimizer_config = config["learning_config"]["optimizer_config"] optimizer = tf.keras.optimizers.Adam( TransformerSchedule( d_model=config["model_config"]["dmodel"], warmup_steps=optimizer_config["warmup_steps"], max_lr=(0.05 / math.sqrt(config["model_config"]["dmodel"]))), beta_1=float(optimizer_config["beta1"]), beta_2=float(optimizer_config["beta2"]), epsilon=float(optimizer_config["epsilon"])) conformer_trainer.compile(model=conformer, optimizer=optimizer, max_to_keep=args.max_ckpts) conformer_trainer.fit(train_dataset, eval_dataset, args.eval_train_ratio) if args.export: if args.from_weights: conformer_trainer.model.save_weights(args.export) else: conformer_trainer.model.save(args.export)
"lm_config": { "model_path": "/mnt/Data/ML/NLP/vntc_asrtrain_5gram_trie.binary", "alpha": 2.0, "beta": 2.0 } } text_featurizer = TextFeaturizer(decoder_config) text_featurizer.add_scorer(Scorer(**decoder_config["lm_config"], vocabulary=text_featurizer.vocab_array)) speech_featurizer = TFSpeechFeaturizer({ "sample_rate": 16000, "frame_ms": 25, "stride_ms": 10, "num_feature_bins": 80, "feature_type": "logfbank", "preemphasis": 0.97, # "delta": True, # "delta_delta": True, "normalize_signal": True, "normalize_feature": True, "normalize_per_feature": False, # "pitch": False, }) inp = tf.keras.Input(shape=[None, 80, 3]) class BaseModel(tf.keras.Model): def __init__(self, name="basemodel", **kwargs): super().__init__(name=name, **kwargs) self.dense = tf.keras.layers.Dense(350) self.time_reduction_factor = 1
def main(): tf.keras.backend.clear_session() parser = argparse.ArgumentParser(prog="Deep Speech 2 Tester") parser.add_argument("--config", "-c", type=str, default=DEFAULT_YAML, help="The file path of model configuration file") parser.add_argument("--saved_path", "-e", type=str, default=None, help="Path to the model file to be exported") parser.add_argument("--from_weights", type=bool, default=False, help="Whether to save or load only weights") parser.add_argument("--tfrecords", type=bool, default=False, help="Whether to use tfrecords dataset") parser.add_argument("--batch_size", type=int, default=1, help="Batch size for testing") args = parser.parse_args() tf.random.set_seed(0) assert args.export config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() ds2_model = DeepSpeech2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes, name="deepspeech2") ds2_model._build([1, 50, f, c]) ds2_model.summary(line_length=100) if args.tfrecords: test_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["test_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "test", augmentations=config["learning_config"]["augmentations"], shuffle=False).create(args.batch_size) else: test_dataset = ASRSliceDataset(stage="test", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=False).create(args.batch_size) ctc_tester = BaseTester(config=config["learning_config"]["running_config"], saved_path=args.saved_path, from_weights=args.from_weights) ctc_tester.compile(ds2_model, speech_featurizer, text_featurizer) ctc_tester.run(test_dataset)
def main(): tf.keras.backend.clear_session() parser = argparse.ArgumentParser(prog="Deep Speech 2 Training") parser.add_argument("--config", "-c", type=str, default=DEFAULT_YAML, help="The file path of model configuration file") parser.add_argument("--export", "-e", type=str, default=None, help="Path to the model file to be exported") parser.add_argument("--mixed_precision", type=bool, default=False, help="Whether to use mixed precision training") parser.add_argument("--save_weights", type=bool, default=False, help="Whether to save or load only weights") parser.add_argument("--max_ckpts", type=int, default=10, help="Max number of checkpoints to keep") parser.add_argument( "--eval_train_ratio", type=int, default=1, help="ratio between train batch size and eval batch size") parser.add_argument("--tfrecords", type=bool, default=False, help="Whether to use tfrecords dataset") args = parser.parse_args() config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(2020) if args.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) print("Enabled mixed precision training") if args.tfrecords: train_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["train_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "train", augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["eval_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "eval", shuffle=False) else: train_dataset = ASRSliceDataset( stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["eval_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=True) eval_dataset = ASRSliceDataset(stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=True) ctc_trainer = CTCTrainer(speech_featurizer, text_featurizer, config["learning_config"]["running_config"], args.mixed_precision) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() with ctc_trainer.strategy.scope(): ds2_model = DeepSpeech2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes, name="deepspeech2") ds2_model._build([1, 50, f, c]) # Compile ctc_trainer.compile(ds2_model, config["learning_config"]["optimizer_config"], max_to_keep=args.max_ckpts) ctc_trainer.fit(train_dataset, eval_dataset, args.eval_train_ratio) if args.export: if args.save_weights: ctc_trainer.model.save_weights(args.export) else: ctc_trainer.model.save(args.export)