def test_determine_utterance_segments(): """Test the generation of segments from aligned utterances. This is a function that is used after a completed CTC segmentation. Results are checked and compared with test vectors. """ config = CtcSegmentationParameters() config.frame_duration_ms = 1000 config.score_min_mean_over_L = 2 utt_begin_indices = [1, 4, 9] text = ["catzz#\n", "dogs!!\n"] char_probs = np.array([-0.5] * 10) timings = np.array(list(range(10))) + 0.5 segments = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, text) correct_segments = [(2.0, 4.0, -0.5), (5.0, 9.0, -0.5)] for i, boundary in enumerate(segments): utt_segment = f"{i} {boundary[0]:.2f} {boundary[1]:.2f} {boundary[2]:.2f}" print(utt_segment) for i in [0, 1]: for j in [0, 1, 2]: assert segments[i][j] == correct_segments[i][j]
def ctc_align(args, device): """ESPnet-specific interface for CTC segmentation. Parses configuration, infers the CTC posterior probabilities, and then aligns start and end of utterances using CTC segmentation. Results are written to the output file given in the args. :param args: given configuration :param device: for inference; one of ['cuda', 'cpu'] :return: 0 on success """ model, train_args = load_trained_model(args.model) assert isinstance(model, ASRInterface) load_inputs_and_targets = LoadInputsAndTargets( mode="asr", load_output=True, sort_in_input_length=False, preprocess_conf=train_args.preprocess_conf if args.preprocess_conf is None else args.preprocess_conf, preprocess_args={"train": False}, ) logging.info(f"Decoding device={device}") # Warn for nets with high memory consumption on long audio files if hasattr(model, "enc"): encoder_module = model.enc.__class__.__module__ elif hasattr(model, "encoder"): encoder_module = model.encoder.__class__.__module__ else: encoder_module = "Unknown" logging.info(f"Encoder module: {encoder_module}") logging.info(f"CTC module: {model.ctc.__class__.__module__}") if "rnn" not in encoder_module: logging.warning("No BLSTM model detected; memory consumption may be high.") model.to(device=device).eval() # read audio and text json data with open(args.data_json, "rb") as f: js = json.load(f)["utts"] with open(args.utt_text, "r", encoding="utf-8") as f: lines = f.readlines() i = 0 text = {} segment_names = {} for name in js.keys(): text_per_audio = [] segment_names_per_audio = [] while i < len(lines) and lines[i].startswith(name): text_per_audio.append(lines[i][lines[i].find(" ") + 1 :]) segment_names_per_audio.append(lines[i][: lines[i].find(" ")]) i += 1 text[name] = text_per_audio segment_names[name] = segment_names_per_audio # apply configuration config = CtcSegmentationParameters() subsampling_factor = 1 frame_duration_ms = 10 if args.subsampling_factor is not None: subsampling_factor = args.subsampling_factor if args.frame_duration is not None: frame_duration_ms = args.frame_duration # Backwards compatibility to ctc_segmentation <= 1.5.3 if hasattr(config, "index_duration"): config.index_duration = frame_duration_ms * subsampling_factor / 1000 else: config.subsampling_factor = subsampling_factor config.frame_duration_ms = frame_duration_ms if args.min_window_size is not None: config.min_window_size = args.min_window_size if args.max_window_size is not None: config.max_window_size = args.max_window_size config.char_list = train_args.char_list if args.use_dict_blank is not None: logging.warning( "The option --use-dict-blank is deprecated. If needed," " use --set-blank instead." ) if args.set_blank is not None: config.blank = args.set_blank if args.replace_spaces_with_blanks is not None: if args.replace_spaces_with_blanks: config.replace_spaces_with_blanks = True else: config.replace_spaces_with_blanks = False if args.gratis_blank: config.blank_transition_cost_zero = True if config.blank_transition_cost_zero and args.replace_spaces_with_blanks: logging.error( "Blanks are inserted between words, and also the transition cost of blank" " is zero. This configuration may lead to misalignments!" ) if args.scoring_length is not None: config.score_min_mean_over_L = args.scoring_length logging.info(f"Frame timings: {frame_duration_ms}ms * {subsampling_factor}") # Iterate over audio files to decode and align for idx, name in enumerate(js.keys(), 1): logging.info("(%d/%d) Aligning " + name, idx, len(js.keys())) batch = [(name, js[name])] feat, label = load_inputs_and_targets(batch) feat = feat[0] with torch.no_grad(): # Encode input frames enc_output = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0) # Apply ctc layer to obtain log character probabilities lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy() # Prepare the text for aligning ground_truth_mat, utt_begin_indices = prepare_text(config, text[name]) # Align using CTC segmentation timings, char_probs, state_list = ctc_segmentation( config, lpz, ground_truth_mat ) logging.debug(f"state_list = {state_list}") # Obtain list of utterances with time intervals and confidence score segments = determine_utterance_segments( config, utt_begin_indices, char_probs, timings, text[name] ) # Write to "segments" file for i, boundary in enumerate(segments): utt_segment = ( f"{segment_names[name][i]} {name} {boundary[0]:.2f}" f" {boundary[1]:.2f} {boundary[2]:.9f}\n" ) args.output.write(utt_segment) return 0
#build typical alphabet alphabet = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'",'%'] quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En") logprobs = quartznet.transcribe([filename],logprobs=True) greedy_transcript = predict_labels_greedy(alphabet,logprobs[0].cpu().numpy()) # adapted example from here: # https://github.com/lumaku/ctc-segmentation config = CtcSegmentationParameters() #frame duration is the window of the predictions (i.e. logprobs prediction window) config.frame_duration_ms = 20 #character that is intended for 'blank' - in our case, we specify the last character in alphabet. config.blank = len(alphabet)-1 ground_truth_mat, utt_begin_indices = prepare_text(config,transcript,alphabet) timings, char_probs, state_list = ctc_segmentation(config,logprobs[0].cpu().numpy(),ground_truth_mat) # Obtain list of utterances with time intervals and confidence score segments = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, transcript) quartznet_transcript = quartznet.transcribe([filename]) print('Ground Truth Transcript:',transcript) print('Quartznet Transcript:',quartznet_transcript[0]) print('Quartznet Dense Sequnce (greedy search):\n',greedy_transcript) print('CTC Segmentation Dense Sequnce:\n',''.join(state_list))
def validate_asr_with_alignment(asr_model,val_ds,num_to_validate): val_set = [] with open(val_ds) as F: for line in F: val = json.loads(line) val_set.append(val) val_files = [t["audio_filepath"] for t in val_set[0:num_to_validate]] val_text = [t["text"] for t in val_set[0:num_to_validate]] test_cfg = asr_model.cfg['validation_ds'] test_cfg['manifest_filepath'] = val_ds asr_model.setup_test_data(test_cfg) #TODO: what is this doing? calc_wer(asr_model) asr_model.preprocessor._sample_rate = test_cfg['sample_rate'] print("batch size: ", test_cfg['batch_size'], "preprocessor sample_rate: ", asr_model.preprocessor._sample_rate) logprobs_list = asr_model.transcribe(val_files, batch_size=test_cfg['batch_size'], logprobs=True) nlogprobs = len(logprobs_list) alphabet = [t for t in asr_model.cfg['labels']] + ['%'] # converting to list and adding blank character. # adapted example from here: # https://github.com/lumaku/ctc-segmentation config = CtcSegmentationParameters() config.frame_duration_ms = 20 #frame duration is the window of the predictions (i.e. logprobs prediction window) config.blank = len(alphabet)-1 #index for character that is intended for 'blank' - in our case, we specify the last character in alphabet. for ii in range(nlogprobs): transcript = val_text[ii] ground_truth_mat, utt_begin_indices = prepare_text(config,transcript,alphabet) timings, char_probs, state_list = ctc_segmentation(config,logprobs_list[ii].cpu().numpy(),ground_truth_mat) # Obtain list of utterances with time intervals and confidence score segments = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, transcript) quartznet_transcript = asr_model.transcribe([val_files[ii]]) print('Ground Truth Transcript:',transcript) print('Quartznet Transcript:',quartznet_transcript[0]) print('CTC Segmentation Dense Sequnce:\n',''.join(state_list)) #save onset per word. print('Saving timing prediction.') fname = open(val_files[ii][:-4]+'_align.csv','w') #jamendolyrics convention for i in transcript.split(): # re.search performs regular expression operations. # .format inserts characters into {}. # r'<string>' is considered a raw string. # char.start() gives you the start index of the starting character of the word (i) in transcript string # char.end() gives you the last index of the ending character** of the word (i) in transcript string # **the ending character is offset by one for the regex command, so a -1 is required to get the right # index char = re.search(r'\b({})\b'.format(i),transcript) # segments[index of character][start time of char=0] onset = segments[char.start()][0] # segments[index of character][end time of char=1] term = segments[char.end()-1][1] fname.write(str(onset)+','+str(term)+'\n') fname.close()
def ctc_align(args, device): """ESPnet-specific interface for CTC segmentation. Parses configuration, infers the CTC posterior probabilities, and then aligns start and end of utterances using CTC segmentation. Results are written to the output file given in the args. :param args: given configuration :param device: for inference; one of ['cuda', 'cpu'] :return: 0 on success """ model, train_args = load_trained_model(args.model) assert isinstance(model, ASRInterface) load_inputs_and_targets = LoadInputsAndTargets( mode="asr", load_output=True, sort_in_input_length=False, preprocess_conf=train_args.preprocess_conf if args.preprocess_conf is None else args.preprocess_conf, preprocess_args={"train": False}, ) logging.info(f"Decoding device={device}") model.to(device=device).eval() # read audio and text json data with open(args.data_json, "rb") as f: js = json.load(f)["utts"] with open(args.utt_text, "r") as f: lines = f.readlines() i = 0 text = {} segment_names = {} for name in js.keys(): text_per_audio = [] segment_names_per_audio = [] while i < len(lines) and lines[i].startswith(name): text_per_audio.append(lines[i][lines[i].find(" ") + 1:]) segment_names_per_audio.append(lines[i][:lines[i].find(" ")]) i += 1 text[name] = text_per_audio segment_names[name] = segment_names_per_audio # apply configuration config = CtcSegmentationParameters() if args.subsampling_factor is not None: config.subsampling_factor = args.subsampling_factor if args.frame_duration is not None: config.frame_duration_ms = args.frame_duration if args.min_window_size is not None: config.min_window_size = args.min_window_size if args.max_window_size is not None: config.max_window_size = args.max_window_size char_list = train_args.char_list if args.use_dict_blank: config.blank = char_list[0] logging.debug( f"Frame timings: {config.frame_duration_ms}ms * {config.subsampling_factor}" ) # Iterate over audio files to decode and align for idx, name in enumerate(js.keys(), 1): logging.info("(%d/%d) Aligning " + name, idx, len(js.keys())) batch = [(name, js[name])] feat, label = load_inputs_and_targets(batch) feat = feat[0] with torch.no_grad(): # Encode input frames enc_output = model.encode( torch.as_tensor(feat).to(device)).unsqueeze(0) # Apply ctc layer to obtain log character probabilities lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy() # Prepare the text for aligning ground_truth_mat, utt_begin_indices = prepare_text( config, text[name], char_list) # Align using CTC segmentation timings, char_probs, state_list = ctc_segmentation( config, lpz, ground_truth_mat) # Obtain list of utterances with time intervals and confidence score segments = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, text[name]) # Write to "segments" file for i, boundary in enumerate(segments): utt_segment = (f"{segment_names[name][i]} {name} {boundary[0]:.2f}" f" {boundary[1]:.2f} {boundary[2]:.9f}\n") args.output.write(utt_segment) return 0