def test_audio_preprocessors(self): batch_size = 2 dl = nemo_asr.AudioToSpeechLabelDataLayer( # featurizer_config=self.featurizer_config, manifest_filepath=self.manifest_filepath, labels=self.labels, batch_size=batch_size, # placement=DeviceType.GPU, drop_last=False, shuffle=False, ) installed_torchaudio = True try: import torchaudio except ModuleNotFoundError: installed_torchaudio = False with self.assertRaises(ModuleNotFoundError): to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor(n_fft=400, window=None) with self.assertRaises(ModuleNotFoundError): to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15) if installed_torchaudio: to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor(n_fft=400, window=None) to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15) time_stretch_augment = nemo_asr.TimeStretchAugmentation( self.featurizer_config['sample_rate'], probability=1.0, min_speed_rate=0.9, max_speed_rate=1.1 ) to_melspec = nemo_asr.AudioToMelSpectrogramPreprocessor(features=50) for batch in dl.data_iterator: input_signals, seq_lengths, _, _ = batch input_signals = input_signals.to(to_melspec._device) seq_lengths = seq_lengths.to(to_melspec._device) melspec = to_melspec.forward(input_signals, seq_lengths) if installed_torchaudio: spec = to_spectrogram.forward(input_signals, seq_lengths) mfcc = to_mfcc.forward(input_signals, seq_lengths) ts_input_signals = time_stretch_augment.forward(input_signals, seq_lengths) # Check that number of features is what we expect self.assertTrue(melspec[0].shape[1] == 50) if installed_torchaudio: self.assertTrue(spec[0].shape[1] == 201) # n_fft // 2 + 1 bins self.assertTrue(mfcc[0].shape[1] == 15) timesteps = ts_input_signals[0].shape[1] self.assertTrue(timesteps <= int(1.15 * self.featurizer_config['sample_rate'])) self.assertTrue(timesteps >= int(0.85 * self.featurizer_config['sample_rate']))
def test_audio_preprocessors(self): batch_size = 5 dl = nemo_asr.AudioToTextDataLayer( # featurizer_config=self.featurizer_config, manifest_filepath=self.manifest_filepath, labels=self.labels, batch_size=batch_size, # placement=DeviceType.GPU, drop_last=True, shuffle=False, ) installed_torchaudio = True try: import torchaudio except ModuleNotFoundError: installed_torchaudio = False with self.assertRaises(ModuleNotFoundError): to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor( n_fft=400, window=None) with self.assertRaises(ModuleNotFoundError): to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15) if installed_torchaudio: to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor( n_fft=400, window=None) to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15) to_melspec = nemo_asr.AudioToMelSpectrogramPreprocessor(features=50) for batch in dl.data_iterator: input_signals, seq_lengths, _, _ = batch input_signals = input_signals.to(to_melspec._device) seq_lengths = seq_lengths.to(to_melspec._device) melspec = to_melspec.forward(input_signals, seq_lengths) if installed_torchaudio: spec = to_spectrogram.forward(input_signals, seq_lengths) mfcc = to_mfcc.forward(input_signals, seq_lengths) # Check that number of features is what we expect self.assertTrue(melspec[0].shape[1] == 50) if installed_torchaudio: self.assertTrue(spec[0].shape[1] == 201) # n_fft // 2 + 1 bins self.assertTrue(mfcc[0].shape[1] == 15)
def create_all_dags(args, neural_factory): yaml = YAML(typ="safe") with open(args.model_config) as f: jasper_params = yaml.load(f) labels = jasper_params['labels'] # Vocab of tokens sample_rate = jasper_params['sample_rate'] # Calculate num_workers for dataloader total_cpus = os.cpu_count() cpu_per_traindl = max(int(total_cpus / neural_factory.world_size), 1) # perturb_config = jasper_params.get('perturb', None) train_dl_params = copy.deepcopy( jasper_params["AudioToSpeechLabelDataLayer"]) train_dl_params.update( jasper_params["AudioToSpeechLabelDataLayer"]["train"]) del train_dl_params["train"] del train_dl_params["eval"] # del train_dl_params["normalize_transcripts"] # Look for augmentations audio_augmentor = jasper_params.get('AudioAugmentor', None) data_layer = nemo_asr.AudioToSpeechLabelDataLayer( manifest_filepath=args.train_dataset, labels=labels, sample_rate=sample_rate, batch_size=args.batch_size, num_workers=cpu_per_traindl, augmentor=audio_augmentor, **train_dl_params, ) crop_pad_augmentation = nemo_asr.CropOrPadSpectrogramAugmentation( audio_length=128) N = len(data_layer) steps_per_epoch = math.ceil( N / (args.batch_size * args.iter_per_step * args.num_gpus)) logging.info('Steps per epoch : {0}'.format(steps_per_epoch)) logging.info('Have {0} examples to train on.'.format(N)) data_preprocessor = nemo_asr.AudioToMFCCPreprocessor( sample_rate=sample_rate, **jasper_params["AudioToMFCCPreprocessor"], ) spectr_augment_config = jasper_params.get('SpectrogramAugmentation', None) if spectr_augment_config: data_spectr_augmentation = nemo_asr.SpectrogramAugmentation( **spectr_augment_config) eval_dl_params = copy.deepcopy( jasper_params["AudioToSpeechLabelDataLayer"]) eval_dl_params.update(jasper_params["AudioToSpeechLabelDataLayer"]["eval"]) del eval_dl_params["train"] del eval_dl_params["eval"] data_layers_eval = [] if args.eval_datasets: for eval_datasets in args.eval_datasets: data_layer_eval = nemo_asr.AudioToSpeechLabelDataLayer( manifest_filepath=eval_datasets, sample_rate=sample_rate, labels=labels, batch_size=args.eval_batch_size, num_workers=cpu_per_traindl, **eval_dl_params, ) data_layers_eval.append(data_layer_eval) else: logging.warning("There were no val datasets passed") jasper_encoder = nemo_asr.JasperEncoder(**jasper_params["JasperEncoder"], ) jasper_decoder = nemo_asr.JasperDecoderForClassification( feat_in=jasper_params["JasperEncoder"]["jasper"][-1]["filters"], num_classes=len(labels), **jasper_params['JasperDecoderForClassification'], ) ce_loss = nemo_asr.CrossEntropyLossNM() logging.info('================================') logging.info( f"Number of parameters in encoder: {jasper_encoder.num_weights}") logging.info( f"Number of parameters in decoder: {jasper_decoder.num_weights}") logging.info(f"Total number of parameters in model: " f"{jasper_decoder.num_weights + jasper_encoder.num_weights}") logging.info('================================') # Train DAG # --- Assemble Training DAG --- # audio_signal, audio_signal_len, commands, command_len = data_layer() processed_signal, processed_signal_len = data_preprocessor( input_signal=audio_signal, length=audio_signal_len) processed_signal, processed_signal_len = crop_pad_augmentation( input_signal=processed_signal, length=audio_signal_len) if spectr_augment_config: processed_signal = data_spectr_augmentation( input_spec=processed_signal) encoded, encoded_len = jasper_encoder(audio_signal=processed_signal, length=processed_signal_len) decoded = jasper_decoder(encoder_output=encoded) loss = ce_loss(logits=decoded, labels=commands) # Callbacks needed to print info to console and Tensorboard train_callback = nemo.core.SimpleLossLoggerCallback( # Notice that we pass in loss, predictions, and the labels (commands). # Of course we would like to see our training loss, but we need the # other arguments to calculate the accuracy. tensors=[loss, decoded, commands], # The print_func defines what gets printed. print_func=partial(monitor_classification_training_progress, eval_metric=None), get_tb_values=lambda x: [("loss", x[0])], tb_writer=neural_factory.tb_writer, ) chpt_callback = nemo.core.CheckpointCallback( folder=neural_factory.checkpoint_dir, load_from_folder=args.load_dir, step_freq=args.checkpoint_save_freq, ) callbacks = [train_callback, chpt_callback] # assemble eval DAGs for i, eval_dl in enumerate(data_layers_eval): # --- Assemble Training DAG --- # test_audio_signal, test_audio_signal_len, test_commands, test_command_len = eval_dl( ) test_processed_signal, test_processed_signal_len = data_preprocessor( input_signal=test_audio_signal, length=test_audio_signal_len) test_processed_signal, test_processed_signal_len = crop_pad_augmentation( input_signal=test_processed_signal, length=test_processed_signal_len) test_encoded, test_encoded_len = jasper_encoder( audio_signal=test_processed_signal, length=test_processed_signal_len) test_decoded = jasper_decoder(encoder_output=test_encoded) test_loss = ce_loss(logits=test_decoded, labels=test_commands) # create corresponding eval callback tagname = os.path.basename(args.eval_datasets[i]).split(".")[0] eval_callback = nemo.core.EvaluatorCallback( eval_tensors=[test_loss, test_decoded, test_commands], user_iter_callback=partial(process_classification_evaluation_batch, top_k=1), user_epochs_done_callback=partial( process_classification_evaluation_epoch, eval_metric=1, tag=tagname), eval_step=args. eval_freq, # How often we evaluate the model on the test set tb_writer=neural_factory.tb_writer, ) callbacks.append(eval_callback) return loss, callbacks, steps_per_epoch