def run(args): assert args.mode in modes, f"Mode must in {modes}" transcripts = preprocess_paths(args.transcripts) tfrecords_dir = preprocess_paths(args.tfrecords_dir) if args.mode == "train": ASRTFRecordDataset(transcripts, tfrecords_dir, None, None, args.mode, shuffle=True).create_tfrecords() else: ASRTFRecordDataset(transcripts, tfrecords_dir, None, None, args.mode, shuffle=False).create_tfrecords()
def run(args): config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(0) assert args.saved_model if args.tfrecords: test_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["test_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "test", augmentations=config["learning_config"]["augmentations"], shuffle=False).create( config["learning_config"]["running_config"]["batch_size"]) else: test_dataset = ASRSliceDataset( stage="test", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["eval_paths"], shuffle=False).create( config["learning_config"]["running_config"]["batch_size"]) # build model f, c = speech_featurizer.compute_feature_dim() conformer = Conformer(vocabulary_size=text_featurizer.num_classes, **config["model_config"]) conformer._build([1, 50, f, c]) conformer.summary(line_length=100) conformer_tester = BaseTester( config=config["learning_config"]["running_config"], saved_path=args.saved_model, from_weights=args.from_weights) conformer_tester.compile(conformer, speech_featurizer, text_featurizer) conformer_tester.run(test_dataset)
if args.subwords_prefix and os.path.exists(f"{args.subwords_prefix}.subwords"): print("Loading subwords ...") text_featurizer = SubwordFeaturizer.load_from_file( config["decoder_config"], args.subwords_prefix) else: raise ValueError("subwords_prefix must be set") tf.random.set_seed(0) assert args.saved if args.tfrecords: test_dataset = ASRTFRecordDataset( data_paths=config["learning_config"]["dataset_config"]["test_paths"], tfrecords_dir=config["learning_config"]["dataset_config"] ["tfrecords_dir"], speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, stage="test", shuffle=False) else: test_dataset = ASRSliceDataset( data_paths=config["learning_config"]["dataset_config"]["test_paths"], speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, stage="test", shuffle=False) # build model conformer = Conformer(vocabulary_size=text_featurizer.num_classes, **config["model_config"]) conformer._build(speech_featurizer.shape)
from tiramisu_asr.datasets.asr_dataset import ASRTFRecordDataset, ASRSliceDataset from tiramisu_asr.featurizers.speech_featurizers import TFSpeechFeaturizer from tiramisu_asr.featurizers.text_featurizers import TextFeaturizer from tiramisu_asr.runners.ctc_runners import CTCTrainer from model import DeepSpeech2 config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) if args.tfrecords: train_dataset = ASRTFRecordDataset( data_paths=config["learning_config"]["dataset_config"]["train_paths"], tfrecords_dir=config["learning_config"]["dataset_config"] ["tfrecords_dir"], speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, augmentations=config["learning_config"]["augmentations"], stage="train", cache=args.cache, shuffle=True) eval_dataset = ASRTFRecordDataset( data_paths=config["learning_config"]["dataset_config"]["eval_paths"], tfrecords_dir=config["learning_config"]["dataset_config"] ["tfrecords_dir"], speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, stage="eval", cache=args.cache, shuffle=True) else: train_dataset = ASRSliceDataset(
def run(args): config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(2020) if args.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy( "mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) print("Enabled mixed precision training") if args.tfrecords: train_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["train_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "train", augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["eval_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "eval", shuffle=False) else: train_dataset = ASRSliceDataset( stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["train_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRSliceDataset(stage="eval", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=False) conformer_trainer = TransducerTrainer( config=config["learning_config"]["running_config"], text_featurizer=text_featurizer, is_mixed_precision=args.mixed_precision) with conformer_trainer.strategy.scope(): # build model f, c = speech_featurizer.compute_feature_dim() conformer = Conformer(**config["model_config"], vocabulary_size=text_featurizer.num_classes) conformer._build([1, 50, f, c]) optimizer_config = config["learning_config"]["optimizer_config"] optimizer = tf.keras.optimizers.Adam( TransformerSchedule( d_model=config["model_config"]["dmodel"], warmup_steps=optimizer_config["warmup_steps"], max_lr=(0.05 / math.sqrt(config["model_config"]["dmodel"]))), beta_1=float(optimizer_config["beta1"]), beta_2=float(optimizer_config["beta2"]), epsilon=float(optimizer_config["epsilon"])) conformer_trainer.compile(model=conformer, optimizer=optimizer, max_to_keep=args.max_ckpts) conformer_trainer.fit(train_dataset, eval_dataset, args.eval_train_ratio) if args.export: if args.from_weights: conformer_trainer.model.save_weights(args.export) else: conformer_trainer.model.save(args.export)
def run(args): assert args.mode in modes, f"Mode must in {modes}" config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = SpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) if args.mode == "train": tf.random.set_seed(2020) if args.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) print("Enabled mixed precision training") ctc_trainer = CTCTrainer(speech_featurizer, text_featurizer, config["learning_config"]["running_config"], args.mixed_precision) if args.tfrecords: train_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["train_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "train", augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["eval_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "eval", shuffle=False ) else: train_dataset = ASRSliceDataset( stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"]["train_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRSliceDataset( stage="eval", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"]["eval_paths"], shuffle=False ) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() with ctc_trainer.strategy.scope(): satt_ds2_model = SelfAttentionDS2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes) satt_ds2_model._build([1, 50, f, c]) optimizer = create_optimizer( name=config["learning_config"]["optimizer_config"]["name"], d_model=config["model_config"]["att"]["head_size"], **config["learning_config"]["optimizer_config"]["config"] ) # Compile ctc_trainer.compile(satt_ds2_model, optimizer, max_to_keep=args.max_ckpts) ctc_trainer.fit(train_dataset, eval_dataset, args.eval_train_ratio) if args.export: if args.from_weights: ctc_trainer.model.save_weights(args.export) else: ctc_trainer.model.save(args.export) elif args.mode == "test": tf.random.set_seed(0) assert args.export text_featurizer.add_scorer( Scorer(**text_featurizer.decoder_config["lm_config"], vocabulary=text_featurizer.vocab_array)) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() satt_ds2_model = SelfAttentionDS2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes) satt_ds2_model._build([1, 50, f, c]) satt_ds2_model.summary(line_length=100) optimizer = create_optimizer( name=config["learning_config"]["optimizer_config"]["name"], d_model=config["model_config"]["att"]["head_size"], **config["learning_config"]["optimizer_config"]["config"] ) batch_size = config["learning_config"]["running_config"]["batch_size"] if args.tfrecords: test_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["test_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "test", augmentations=config["learning_config"]["augmentations"], shuffle=False ).create(batch_size * args.eval_train_ratio) else: test_dataset = ASRSliceDataset( stage="test", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"]["test_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=False ).create(batch_size * args.eval_train_ratio) ctc_tester = BaseTester( config=config["learning_config"]["running_config"], saved_path=args.export, from_weights=args.from_weights ) ctc_tester.compile(satt_ds2_model, speech_featurizer, text_featurizer) ctc_tester.run(test_dataset) else: assert args.export # Build DS2 model f, c = speech_featurizer.compute_feature_dim() satt_ds2_model = SelfAttentionDS2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes) satt_ds2_model._build([1, 50, f, c]) optimizer = create_optimizer( name=config["learning_config"]["optimizer_config"]["name"], d_model=config["model_config"]["att"]["head_size"], **config["learning_config"]["optimizer_config"]["config"] ) def save_func(**kwargs): if args.from_weights: kwargs["model"].save_weights(args.export) else: kwargs["model"].save(args.export) save_from_checkpoint(func=save_func, outdir=config["learning_config"]["running_config"]["outdir"], model=satt_ds2_model, optimizer=optimizer)
def main(): tf.keras.backend.clear_session() parser = argparse.ArgumentParser(prog="Deep Speech 2 Tester") parser.add_argument("--config", "-c", type=str, default=DEFAULT_YAML, help="The file path of model configuration file") parser.add_argument("--saved_path", "-e", type=str, default=None, help="Path to the model file to be exported") parser.add_argument("--from_weights", type=bool, default=False, help="Whether to save or load only weights") parser.add_argument("--tfrecords", type=bool, default=False, help="Whether to use tfrecords dataset") parser.add_argument("--batch_size", type=int, default=1, help="Batch size for testing") args = parser.parse_args() tf.random.set_seed(0) assert args.export config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() ds2_model = DeepSpeech2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes, name="deepspeech2") ds2_model._build([1, 50, f, c]) ds2_model.summary(line_length=100) if args.tfrecords: test_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["test_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "test", augmentations=config["learning_config"]["augmentations"], shuffle=False).create(args.batch_size) else: test_dataset = ASRSliceDataset(stage="test", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=False).create(args.batch_size) ctc_tester = BaseTester(config=config["learning_config"]["running_config"], saved_path=args.saved_path, from_weights=args.from_weights) ctc_tester.compile(ds2_model, speech_featurizer, text_featurizer) ctc_tester.run(test_dataset)
default=None, help="Directory to tfrecords") parser.add_argument("transcripts", nargs="+", type=str, default=None, help="Paths to transcript files") args = parser.parse_args() assert args.mode in modes, f"Mode must in {modes}" transcripts = preprocess_paths(args.transcripts) tfrecords_dir = preprocess_paths(args.tfrecords_dir) if args.mode == "train": ASRTFRecordDataset(transcripts, tfrecords_dir, None, None, args.mode, shuffle=True).create_tfrecords() else: ASRTFRecordDataset(transcripts, tfrecords_dir, None, None, args.mode, shuffle=False).create_tfrecords()
def main(): tf.keras.backend.clear_session() parser = argparse.ArgumentParser(prog="Deep Speech 2 Training") parser.add_argument("--config", "-c", type=str, default=DEFAULT_YAML, help="The file path of model configuration file") parser.add_argument("--export", "-e", type=str, default=None, help="Path to the model file to be exported") parser.add_argument("--mixed_precision", type=bool, default=False, help="Whether to use mixed precision training") parser.add_argument("--save_weights", type=bool, default=False, help="Whether to save or load only weights") parser.add_argument("--max_ckpts", type=int, default=10, help="Max number of checkpoints to keep") parser.add_argument( "--eval_train_ratio", type=int, default=1, help="ratio between train batch size and eval batch size") parser.add_argument("--tfrecords", type=bool, default=False, help="Whether to use tfrecords dataset") args = parser.parse_args() config = UserConfig(DEFAULT_YAML, args.config, learning=True) speech_featurizer = TFSpeechFeaturizer(config["speech_config"]) text_featurizer = TextFeaturizer(config["decoder_config"]) tf.random.set_seed(2020) if args.mixed_precision: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) print("Enabled mixed precision training") if args.tfrecords: train_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["train_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "train", augmentations=config["learning_config"]["augmentations"], shuffle=True, ) eval_dataset = ASRTFRecordDataset( config["learning_config"]["dataset_config"]["eval_paths"], config["learning_config"]["dataset_config"]["tfrecords_dir"], speech_featurizer, text_featurizer, "eval", shuffle=False) else: train_dataset = ASRSliceDataset( stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"]["dataset_config"] ["eval_paths"], augmentations=config["learning_config"]["augmentations"], shuffle=True) eval_dataset = ASRSliceDataset(stage="train", speech_featurizer=speech_featurizer, text_featurizer=text_featurizer, data_paths=config["learning_config"] ["dataset_config"]["eval_paths"], shuffle=True) ctc_trainer = CTCTrainer(speech_featurizer, text_featurizer, config["learning_config"]["running_config"], args.mixed_precision) # Build DS2 model f, c = speech_featurizer.compute_feature_dim() with ctc_trainer.strategy.scope(): ds2_model = DeepSpeech2(input_shape=[None, f, c], arch_config=config["model_config"], num_classes=text_featurizer.num_classes, name="deepspeech2") ds2_model._build([1, 50, f, c]) # Compile ctc_trainer.compile(ds2_model, config["learning_config"]["optimizer_config"], max_to_keep=args.max_ckpts) ctc_trainer.fit(train_dataset, eval_dataset, args.eval_train_ratio) if args.export: if args.save_weights: ctc_trainer.model.save_weights(args.export) else: ctc_trainer.model.save(args.export)