Exemplo n.º 1
0
def test_prepare_text():
    """Test the prepare_text function for CTC segmentation.

    Results are checked and compared with test vectors.
    """
    config = CtcSegmentationParameters()
    text = ["catzz#\n", "dogs!!\n"]
    char_list = ["•", "a", "c", "d", "g", "o", "s", "t"]
    ground_truth_mat, utt_begin_indices = prepare_text(config, text, char_list)
    correct_begin_indices = np.array([1, 5, 10])
    assert (utt_begin_indices == correct_begin_indices).all()
    gtm = list(ground_truth_mat.shape)
    assert gtm[0] == 11
    assert gtm[1] == 1
    # test scan for longer tokens:
    text = ["cat"]
    char_list = ["•", "UNK", "a", "c", "t", "cat"]
    ground_truth_mat, utt_begin_indices = prepare_text(config, text, char_list)
    gtm = list(ground_truth_mat.shape)
    assert gtm[0] == 6
    assert gtm[1] == 3
def test_ctc_segmentation():
    """Test CTC segmentation.

    This is a minimal example for the function.
    Only executes CTC segmentation, does not check its result.
    """
    config = CtcSegmentationParameters()
    config.min_window_size = 20
    config.max_window_size = 50
    char_list = [config.blank, "a", "c", "d", "g", "o", "s", "t"]
    text = ["catzz#\n", "dogs!!\n"]
    # lpz = torch.nn.functional.log_softmax(torch.rand(30, 8) * 10, dim=0).numpy()
    ground_truth_mat, utt_begin_indices = prepare_text(config, text, char_list)
    timings, char_probs, state_list = ctc_segmentation(config, lpz, ground_truth_mat)
def test_ctc_segmentation():
    """Test CTC segmentation.

    This is a minimal example for the function.
    Only executes CTC segmentation, does not check its result.
    """
    config = CtcSegmentationParameters()
    config.min_window_size = 20
    config.max_window_size = 50
    char_list = ["•", "a", "c", "d", "g", "o", "s", "t"]
    text = ["catzz#\n", "dogs!!\n"]
    ground_truth_mat, utt_begin_indices = prepare_text(config, text, char_list)
    timings, char_probs, state_list = ctc_segmentation(config, lpz,
                                                       ground_truth_mat)
Exemplo n.º 4
0
def ctc_align(args, device):
    """ESPnet-specific interface for CTC segmentation.

    Parses configuration, infers the CTC posterior probabilities,
    and then aligns start and end of utterances using CTC segmentation.
    Results are written to the output file given in the args.

    :param args: given configuration
    :param device: for inference; one of ['cuda', 'cpu']
    :return:  0 on success
    """
    model, train_args = load_trained_model(args.model)
    assert isinstance(model, ASRInterface)
    load_inputs_and_targets = LoadInputsAndTargets(
        mode="asr",
        load_output=True,
        sort_in_input_length=False,
        preprocess_conf=train_args.preprocess_conf
        if args.preprocess_conf is None
        else args.preprocess_conf,
        preprocess_args={"train": False},
    )
    logging.info(f"Decoding device={device}")
    # Warn for nets with high memory consumption on long audio files
    if hasattr(model, "enc"):
        encoder_module = model.enc.__class__.__module__
    elif hasattr(model, "encoder"):
        encoder_module = model.encoder.__class__.__module__
    else:
        encoder_module = "Unknown"
    logging.info(f"Encoder module: {encoder_module}")
    logging.info(f"CTC module:     {model.ctc.__class__.__module__}")
    if "rnn" not in encoder_module:
        logging.warning("No BLSTM model detected; memory consumption may be high.")
    model.to(device=device).eval()
    # read audio and text json data
    with open(args.data_json, "rb") as f:
        js = json.load(f)["utts"]
    with open(args.utt_text, "r", encoding="utf-8") as f:
        lines = f.readlines()
        i = 0
        text = {}
        segment_names = {}
        for name in js.keys():
            text_per_audio = []
            segment_names_per_audio = []
            while i < len(lines) and lines[i].startswith(name):
                text_per_audio.append(lines[i][lines[i].find(" ") + 1 :])
                segment_names_per_audio.append(lines[i][: lines[i].find(" ")])
                i += 1
            text[name] = text_per_audio
            segment_names[name] = segment_names_per_audio
    # apply configuration
    config = CtcSegmentationParameters()
    subsampling_factor = 1
    frame_duration_ms = 10
    if args.subsampling_factor is not None:
        subsampling_factor = args.subsampling_factor
    if args.frame_duration is not None:
        frame_duration_ms = args.frame_duration
    # Backwards compatibility to ctc_segmentation <= 1.5.3
    if hasattr(config, "index_duration"):
        config.index_duration = frame_duration_ms * subsampling_factor / 1000
    else:
        config.subsampling_factor = subsampling_factor
        config.frame_duration_ms = frame_duration_ms
    if args.min_window_size is not None:
        config.min_window_size = args.min_window_size
    if args.max_window_size is not None:
        config.max_window_size = args.max_window_size
    config.char_list = train_args.char_list
    if args.use_dict_blank is not None:
        logging.warning(
            "The option --use-dict-blank is deprecated. If needed,"
            " use --set-blank instead."
        )
    if args.set_blank is not None:
        config.blank = args.set_blank
    if args.replace_spaces_with_blanks is not None:
        if args.replace_spaces_with_blanks:
            config.replace_spaces_with_blanks = True
        else:
            config.replace_spaces_with_blanks = False
    if args.gratis_blank:
        config.blank_transition_cost_zero = True
    if config.blank_transition_cost_zero and args.replace_spaces_with_blanks:
        logging.error(
            "Blanks are inserted between words, and also the transition cost of blank"
            " is zero. This configuration may lead to misalignments!"
        )
    if args.scoring_length is not None:
        config.score_min_mean_over_L = args.scoring_length
    logging.info(f"Frame timings: {frame_duration_ms}ms * {subsampling_factor}")
    # Iterate over audio files to decode and align
    for idx, name in enumerate(js.keys(), 1):
        logging.info("(%d/%d) Aligning " + name, idx, len(js.keys()))
        batch = [(name, js[name])]
        feat, label = load_inputs_and_targets(batch)
        feat = feat[0]
        with torch.no_grad():
            # Encode input frames
            enc_output = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)
            # Apply ctc layer to obtain log character probabilities
            lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy()
        # Prepare the text for aligning
        ground_truth_mat, utt_begin_indices = prepare_text(config, text[name])
        # Align using CTC segmentation
        timings, char_probs, state_list = ctc_segmentation(
            config, lpz, ground_truth_mat
        )
        logging.debug(f"state_list = {state_list}")
        # Obtain list of utterances with time intervals and confidence score
        segments = determine_utterance_segments(
            config, utt_begin_indices, char_probs, timings, text[name]
        )
        # Write to "segments" file
        for i, boundary in enumerate(segments):
            utt_segment = (
                f"{segment_names[name][i]} {name} {boundary[0]:.2f}"
                f" {boundary[1]:.2f} {boundary[2]:.9f}\n"
            )
            args.output.write(utt_segment)
    return 0
Exemplo n.º 5
0
    def prepare_segmentation_task(self, text, lpz, name=None, speech_len=None):
        """Preprocess text, and gather text and lpz into a task object.

        Text is pre-processed and tokenized depending on configuration.
        If ``speech_len`` is given, the timing configuration is updated.
        Text, lpz, and configuration is collected in a CTCSegmentationTask
        object. The resulting object can be serialized and passed in a
        multiprocessing computation.

        A minimal amount of text processing is done, i.e., splitting the
        utterances in ``text`` into a list and applying ``text_cleaner``.
        It is recommended that you normalize the text beforehand, e.g.,
        change numbers into their spoken equivalent word, remove special
        characters, and convert UTF-8 characters to chars corresponding to
        your ASR model dictionary.

        The text is tokenized based on the ``text_converter`` setting:

        The "tokenize" method is more efficient and the easiest for models
        based on latin or cyrillic script that only contain the main chars,
        ["a", "b", ...] or for Japanese or Chinese ASR models with ~3000
        short Kanji / Hanzi tokens.

        The "classic" method improves the the accuracy of the alignments
        for models that contain longer tokens, but with a greater complexity
        for computation. The function scans for partial tokens which may
        improve time resolution.
        For example, the word "▁really" will be broken down into
        ``['▁', '▁r', '▁re', '▁real', '▁really']``. The alignment will be
        based on the most probable activation sequence given by the network.

        Args:
            text: List or multiline-string with utterance ground truths.
            lpz: Log CTC posterior probabilities obtained from the CTC-network;
                numpy array shaped as ( <time steps>, <classes> ).
            name: Audio file name. Choose a unique name, or the original audio
                file name, to distinguish multiple audio files. Default: None.
            speech_len: Number of sample points. If given, the timing
                configuration is automatically derived from length of fs, length
                of speech and length of lpz. If None is given, make sure the
                timing parameters are correct, see time_stamps for reference!
                Default: None.

        Returns:
            task: CTCSegmentationTask object that can be passed to
                ``get_segments()`` in order to obtain alignments.
        """
        config = self.config
        # Update timing parameters, if needed
        if speech_len is not None:
            lpz_len = lpz.shape[0]
            timing_cfg = self.get_timing_config(speech_len, lpz_len)
            config.set(**timing_cfg)
        # `text` is needed in the form of a list.
        utt_ids, text = self._split_text(text)
        # Obtain utterance & label sequence from text
        if self.text_converter == "tokenize":
            # list of str --tokenize--> list of np.array
            token_list = [
                self.preprocess_fn("<dummy>", {"text": utt})["text"]
                for utt in text
            ]
            # filter out any instances of the <unk> token
            unk = config.char_list.index("<unk>")
            token_list = [utt[utt != unk] for utt in token_list]
            ground_truth_mat, utt_begin_indices = prepare_token_list(
                config, token_list)
        else:
            assert self.text_converter == "classic"
            text = [self.preprocess_fn.text_cleaner(utt) for utt in text]
            token_list = [
                "".join(self.preprocess_fn.tokenizer.text2tokens(utt))
                for utt in text
            ]
            token_list = [utt.replace("<unk>", "") for utt in token_list]
            ground_truth_mat, utt_begin_indices = prepare_text(
                config, token_list)
        task = CTCSegmentationTask(
            config=config,
            name=name,
            text=text,
            ground_truth_mat=ground_truth_mat,
            utt_begin_indices=utt_begin_indices,
            utt_ids=utt_ids,
            lpz=lpz,
        )
        return task
Exemplo n.º 6
0
Arquivo: utils.py Projeto: zt706/NeMo
def get_segments(
    log_probs: np.ndarray,
    path_wav: Union[PosixPath, str],
    transcript_file: Union[PosixPath, str],
    output_file: str,
    vocabulary: List[str],
    window_size: int = 8000,
    frame_duration_ms: int = 20,
) -> None:
    """
    Segments the audio into segments and saves segments timings to a file

    Args:
        log_probs: Log probabilities for the original audio from an ASR model, shape T * |vocabulary|.
                   values for blank should be at position 0
        path_wav: path to the audio .wav file
        transcript_file: path to
        output_file: path to the file to save timings for segments
        vocabulary: vocabulary used to train the ASR model, note blank is at position 0
        window_size: the length of each utterance (in terms of frames of the CTC outputs) fits into that window.
        frame_duration_ms: frame duration in ms
    """
    config = cs.CtcSegmentationParameters()
    config.char_list = vocabulary
    config.min_window_size = window_size
    config.frame_duration_ms = frame_duration_ms
    config.blank = config.space
    config.subsampling_factor = 2

    with open(transcript_file, "r") as f:
        text = f.readlines()
        text = [t.strip() for t in text if t.strip()]

    # add corresponding original text without pre-processing
    transcript_file_no_preprocessing = transcript_file.replace(
        '.txt', '_with_punct.txt')
    if not os.path.exists(transcript_file_no_preprocessing):
        raise ValueError(f'{transcript_file_no_preprocessing} not found.')

    with open(transcript_file_no_preprocessing, "r") as f:
        text_no_preprocessing = f.readlines()
        text_no_preprocessing = [
            t.strip() for t in text_no_preprocessing if t.strip()
        ]

    if len(text_no_preprocessing) != len(text):
        raise ValueError(
            f'{transcript_file} and {transcript_file_no_preprocessing} do not match'
        )

    ground_truth_mat, utt_begin_indices = cs.prepare_text(config, text)
    logging.debug(f"Syncing {transcript_file}")
    logging.debug(
        f"Audio length {os.path.basename(path_wav)}: {log_probs.shape[0]}. "
        f"Text length {os.path.basename(transcript_file)}: {len(ground_truth_mat)}"
    )

    timings, char_probs, char_list = cs.ctc_segmentation(
        config, log_probs, ground_truth_mat)
    segments = cs.determine_utterance_segments(config, utt_begin_indices,
                                               char_probs, timings, text)
    write_output(output_file, path_wav, segments, text, text_no_preprocessing)
Exemplo n.º 7
0
def get_segments(
    log_probs: np.ndarray,
    path_wav: Union[PosixPath, str],
    transcript_file: Union[PosixPath, str],
    output_file: str,
    vocabulary: List[str],
    tokenizer: SentencePieceTokenizer,
    bpe_model: bool,
    index_duration: float,
    window_size: int = 8000,
    log_file: str = "log.log",
    debug: bool = False,
) -> None:
    """
    Segments the audio into segments and saves segments timings to a file

    Args:
        log_probs: Log probabilities for the original audio from an ASR model, shape T * |vocabulary|.
                   values for blank should be at position 0
        path_wav: path to the audio .wav file
        transcript_file: path to
        output_file: path to the file to save timings for segments
        vocabulary: vocabulary used to train the ASR model, note blank is at position len(vocabulary) - 1
        tokenizer: ASR model tokenizer (for BPE models, None for char-based models)
        bpe_model: Indicates whether the model uses BPE
        window_size: the length of each utterance (in terms of frames of the CTC outputs) fits into that window.
        index_duration: corresponding time duration of one CTC output index (in seconds)
    """
    level = "DEBUG" if debug else "INFO"
    file_handler = logging.FileHandler(filename=log_file)
    stdout_handler = logging.StreamHandler(sys.stdout)
    handlers = [file_handler, stdout_handler]
    logging.basicConfig(handlers=handlers, level=level)

    try:
        with open(transcript_file, "r") as f:
            text = f.readlines()
            text = [t.strip() for t in text if t.strip()]

        # add corresponding original text without pre-processing
        transcript_file_no_preprocessing = transcript_file.replace(
            ".txt", "_with_punct.txt")
        if not os.path.exists(transcript_file_no_preprocessing):
            raise ValueError(f"{transcript_file_no_preprocessing} not found.")

        with open(transcript_file_no_preprocessing, "r") as f:
            text_no_preprocessing = f.readlines()
            text_no_preprocessing = [
                t.strip() for t in text_no_preprocessing if t.strip()
            ]

        # add corresponding normalized original text
        transcript_file_normalized = transcript_file.replace(
            ".txt", "_with_punct_normalized.txt")
        if not os.path.exists(transcript_file_normalized):
            raise ValueError(f"{transcript_file_normalized} not found.")

        with open(transcript_file_normalized, "r") as f:
            text_normalized = f.readlines()
            text_normalized = [t.strip() for t in text_normalized if t.strip()]

        if len(text_no_preprocessing) != len(text):
            raise ValueError(
                f"{transcript_file} and {transcript_file_no_preprocessing} do not match"
            )

        if len(text_normalized) != len(text):
            raise ValueError(
                f"{transcript_file} and {transcript_file_normalized} do not match"
            )

        config = cs.CtcSegmentationParameters()
        config.char_list = vocabulary
        config.min_window_size = window_size
        config.index_duration = index_duration

        if bpe_model:
            ground_truth_mat, utt_begin_indices = _prepare_tokenized_text_for_bpe_model(
                text, tokenizer, vocabulary, 0)
        else:
            config.excluded_characters = ".,-?!:»«;'›‹()"
            config.blank = vocabulary.index(" ")
            ground_truth_mat, utt_begin_indices = cs.prepare_text(config, text)

        _print(ground_truth_mat, config.char_list)

        # set this after text prepare_text()
        config.blank = 0
        logging.debug(f"Syncing {transcript_file}")
        logging.debug(
            f"Audio length {os.path.basename(path_wav)}: {log_probs.shape[0]}. "
            f"Text length {os.path.basename(transcript_file)}: {len(ground_truth_mat)}"
        )

        timings, char_probs, char_list = cs.ctc_segmentation(
            config, log_probs, ground_truth_mat)
        _print(ground_truth_mat, vocabulary)
        segments = determine_utterance_segments(config, utt_begin_indices,
                                                char_probs, timings, text,
                                                char_list)

        write_output(output_file, path_wav, segments, text,
                     text_no_preprocessing, text_normalized)
        for i, (word, segment) in enumerate(zip(text, segments)):
            if i < 5:
                logging.debug(
                    f"{segment[0]:.2f} {segment[1]:.2f} {segment[2]:3.4f} {word}"
                )
        logging.info(f"segmentation of {transcript_file} complete.")

    except Exception as e:
        logging.info(f"{e} -- segmentation of {transcript_file} failed")
Exemplo n.º 8
0
def prediction_one_song(model,
                        audio_filename,
                        transcript,
                        lp_dir='tmp',
                        lp_ext='_logprobs.py',
                        word_dir='../lyrics',
                        word_ext='.words.txt',
                        prediction_dir='metadata',
                        prediction_ext='_align.csv'):
    '''
    model  - nemo model object
    lp_dir - path with logprobabilities
    audio_filename - file name of audio song that is being proceesed

    '''
    basename = audio_filename[:-4]  #crop extension (mp3 or wav)
    alphabet = [t for t in model.cfg['labels']
                ] + ['%']  # converting to list and adding blank character.

    # adapted example from here:
    # https://github.com/lumaku/ctc-segmentation
    config = ctc.CtcSegmentationParameters()
    config.frame_duration_ms = 20  #frame duration is the window of the predictions (i.e. logprobs prediction window)
    config.blank = len(
        alphabet
    ) - 1  #index for character that is intended for 'blank' - in our case, we specify the last character in alphabet.
    logprobs_filenames = glob.glob(
        os.path.join(lp_dir, basename + '*' + lp_ext))
    logprobs_filenames.sort()

    logprobs_list = []
    for f in logprobs_filenames:
        logprobs_list.append(np.load(f))

    logprobs = logprobs_list[0]
    for i in range(1, len(logprobs_list)):
        logprobs = np.concatenate((logprobs, logprobs_list[i]))

    print('Prepare Text.', flush=True)
    ground_truth_mat, utt_begin_indices = ctc.prepare_text(
        config, transcript, alphabet)

    print('Segmentation.', flush=True)
    timings, char_probs, state_list = ctc.ctc_segmentation(
        config, logprobs, ground_truth_mat)

    print('Get time intervals.', flush=True)
    # Obtain list of utterances with time intervals and confidence score
    segments = ctc.determine_utterance_segments(config, utt_begin_indices,
                                                char_probs, timings,
                                                transcript)
    tend = time.time()
    pred_fname = prediction_dir + '/' + basename + '_align.csv'  #jamendolyrics convention
    fname = open(pred_fname, 'w')
    offset = 0  #offset is used to compensate for the re.search command which only finds the first
    # match in the string.  so the transcript is iteratively cropped to ensure that the
    # previous words in the transcript are not found again.
    for i in transcript.split():
        #
        # taking each word, and writing out the word timings from segments variable
        #
        # re.search performs regular expression operations.
        # .format inserts characters into {}.
        # r'<string>' is considered a raw string.
        # char.start() gives you the start index of the starting character of the word (i) in transcript string
        # char.end() gives you the last index of the ending character** of the word (i) in transcript string
        # **the ending character is offset by one for the regex command, so a -1 is required to get the right
        # index
        char = re.search(r'{}'.format(i), transcript[offset:])
        #       segments[index of character][start time of char=0]
        onset = segments[char.start() + offset][0]
        #       segments[index of character][end time of char=1]
        term = segments[char.end() - 1 + offset][1]
        offset += char.end()
        fname.write(str(onset) + ',' + str(term) + '\n')
    fname.close()
Exemplo n.º 9
0
def validate_asr_with_alignment(asr_model,val_ds,num_to_validate):
    
    val_set = []
    with open(val_ds) as F:
        for line in F:
            val = json.loads(line)
            val_set.append(val)
    val_files = [t["audio_filepath"] for t in val_set[0:num_to_validate]]
    val_text = [t["text"] for t in val_set[0:num_to_validate]]
    test_cfg = asr_model.cfg['validation_ds']
    test_cfg['manifest_filepath'] = val_ds
    asr_model.setup_test_data(test_cfg)  #TODO: what is this doing?
    calc_wer(asr_model)
    asr_model.preprocessor._sample_rate = test_cfg['sample_rate']
    print("batch size: ", test_cfg['batch_size'],
          "preprocessor sample_rate: ", asr_model.preprocessor._sample_rate)
    
    logprobs_list = asr_model.transcribe(val_files, batch_size=test_cfg['batch_size'], logprobs=True)
    nlogprobs = len(logprobs_list)
    alphabet  = [t for t in asr_model.cfg['labels']] + ['%'] # converting to list and adding blank character.

    # adapted example from here:
    # https://github.com/lumaku/ctc-segmentation
    config = CtcSegmentationParameters()
    config.frame_duration_ms = 20  #frame duration is the window of the predictions (i.e. logprobs prediction window) 
    config.blank = len(alphabet)-1 #index for character that is intended for 'blank' - in our case, we specify the last character in alphabet.

    for ii in range(nlogprobs):
        transcript = val_text[ii]

        ground_truth_mat, utt_begin_indices = prepare_text(config,transcript,alphabet)

        timings, char_probs, state_list     = ctc_segmentation(config,logprobs_list[ii].cpu().numpy(),ground_truth_mat)
        
        # Obtain list of utterances with time intervals and confidence score
        segments                            = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, transcript)
        
        quartznet_transcript = asr_model.transcribe([val_files[ii]])

        print('Ground Truth Transcript:',transcript)
        print('Quartznet Transcript:',quartznet_transcript[0])
        print('CTC Segmentation Dense Sequnce:\n',''.join(state_list))

        #save onset per word.
        print('Saving timing prediction.')
        fname = open(val_files[ii][:-4]+'_align.csv','w') #jamendolyrics convention
        for i in transcript.split():
           # re.search performs regular expression operations.
           # .format inserts characters into {}.  
           # r'<string>' is considered a raw string.
           # char.start() gives you the start index of the starting character of the word (i) in transcript string
           # char.end() gives you the last index of the ending character** of the word (i) in transcript string
           # **the ending character is offset by one for the regex command, so a -1 is required to get the right 
           # index
           char = re.search(r'\b({})\b'.format(i),transcript)
           #       segments[index of character][start time of char=0]
           onset = segments[char.start()][0]
           #       segments[index of character][end time of char=1]
           term  = segments[char.end()-1][1]
           fname.write(str(onset)+','+str(term)+'\n')
        fname.close()
Exemplo n.º 10
0
    quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En")
    
    logprobs = quartznet.transcribe([filename],logprobs=True)
    
    greedy_transcript = predict_labels_greedy(alphabet,logprobs[0].cpu().numpy())

    # adapted example from here:
    # https://github.com/lumaku/ctc-segmentation
    config                              = CtcSegmentationParameters()

    #frame duration is the window of the predictions (i.e. logprobs prediction window)
    config.frame_duration_ms = 20
    #character that is intended for 'blank' - in our case, we specify the last character in alphabet.
    config.blank = len(alphabet)-1
    ground_truth_mat, utt_begin_indices = prepare_text(config,transcript,alphabet)
    
    timings, char_probs, state_list     = ctc_segmentation(config,logprobs[0].cpu().numpy(),ground_truth_mat)
    
    # Obtain list of utterances with time intervals and confidence score
    segments                            = determine_utterance_segments(config, utt_begin_indices, char_probs, timings, transcript)
    
    quartznet_transcript = quartznet.transcribe([filename])

    print('Ground Truth Transcript:',transcript)
    print('Quartznet Transcript:',quartznet_transcript[0])
    print('Quartznet Dense Sequnce (greedy search):\n',greedy_transcript)
    print('CTC Segmentation Dense Sequnce:\n',''.join(state_list))

    #save onset per word.
    print('Saving timing prediction.')
Exemplo n.º 11
0
def ctc_align(args, device):
    """ESPnet-specific interface for CTC segmentation.

    Parses configuration, infers the CTC posterior probabilities,
    and then aligns start and end of utterances using CTC segmentation.
    Results are written to the output file given in the args.

    :param args: given configuration
    :param device: for inference; one of ['cuda', 'cpu']
    :return:  0 on success
    """
    model, train_args = load_trained_model(args.model)
    assert isinstance(model, ASRInterface)
    load_inputs_and_targets = LoadInputsAndTargets(
        mode="asr",
        load_output=True,
        sort_in_input_length=False,
        preprocess_conf=train_args.preprocess_conf
        if args.preprocess_conf is None else args.preprocess_conf,
        preprocess_args={"train": False},
    )
    logging.info(f"Decoding device={device}")
    model.to(device=device).eval()
    # read audio and text json data
    with open(args.data_json, "rb") as f:
        js = json.load(f)["utts"]
    with open(args.utt_text, "r") as f:
        lines = f.readlines()
        i = 0
        text = {}
        segment_names = {}
        for name in js.keys():
            text_per_audio = []
            segment_names_per_audio = []
            while i < len(lines) and lines[i].startswith(name):
                text_per_audio.append(lines[i][lines[i].find(" ") + 1:])
                segment_names_per_audio.append(lines[i][:lines[i].find(" ")])
                i += 1
            text[name] = text_per_audio
            segment_names[name] = segment_names_per_audio
    # apply configuration
    config = CtcSegmentationParameters()
    if args.subsampling_factor is not None:
        config.subsampling_factor = args.subsampling_factor
    if args.frame_duration is not None:
        config.frame_duration_ms = args.frame_duration
    if args.min_window_size is not None:
        config.min_window_size = args.min_window_size
    if args.max_window_size is not None:
        config.max_window_size = args.max_window_size
    char_list = train_args.char_list
    if args.use_dict_blank:
        config.blank = char_list[0]
    logging.debug(
        f"Frame timings: {config.frame_duration_ms}ms * {config.subsampling_factor}"
    )
    # Iterate over audio files to decode and align
    for idx, name in enumerate(js.keys(), 1):
        logging.info("(%d/%d) Aligning " + name, idx, len(js.keys()))
        batch = [(name, js[name])]
        feat, label = load_inputs_and_targets(batch)
        feat = feat[0]
        with torch.no_grad():
            # Encode input frames
            enc_output = model.encode(
                torch.as_tensor(feat).to(device)).unsqueeze(0)
            # Apply ctc layer to obtain log character probabilities
            lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy()
        # Prepare the text for aligning
        ground_truth_mat, utt_begin_indices = prepare_text(
            config, text[name], char_list)
        # Align using CTC segmentation
        timings, char_probs, state_list = ctc_segmentation(
            config, lpz, ground_truth_mat)
        # Obtain list of utterances with time intervals and confidence score
        segments = determine_utterance_segments(config, utt_begin_indices,
                                                char_probs, timings,
                                                text[name])
        # Write to "segments" file
        for i, boundary in enumerate(segments):
            utt_segment = (f"{segment_names[name][i]} {name} {boundary[0]:.2f}"
                           f" {boundary[1]:.2f} {boundary[2]:.9f}\n")
            args.output.write(utt_segment)
    return 0