Ejemplo n.º 1
0
def test_ctcsegmentationparameters():
    """Test the configuration object.
    Test repr and init.
    """
    config = CtcSegmentationParameters()
    config = eval(str(config))
    assert config.index_duration_in_seconds == 0.025
    config.index_duration = 0.025
    assert config.index_duration_in_seconds == 0.025
    # test excluded parameters and update procedure
    config.set(char_list=["a", "»"])
    config.update_excluded_characters()
    assert "»" not in config.excluded_characters
Ejemplo n.º 2
0
def test_determine_utterance_segments():
    """Test the generation of segments from aligned utterances.

    This is a function that is used after a completed CTC segmentation.
    Results are checked and compared with test vectors.
    """
    config = CtcSegmentationParameters()
    frame_duration_ms = 1000
    config.index_duration = frame_duration_ms / 1000.0
    config.score_min_mean_over_L = 2
    utt_begin_indices = [1, 4, 9]
    text = ["catzz#\n", "dogs!!\n"]
    char_probs = np.array([-0.5] * 10)
    timings = np.array(list(range(10))) + 0.5
    segments = determine_utterance_segments(config, utt_begin_indices,
                                            char_probs, timings, text)
    correct_segments = [(2.0, 4.0, -0.5), (5.0, 9.0, -0.5)]
    for i in [0, 1]:
        for j in [0, 1, 2]:
            assert segments[i][j] == correct_segments[i][j]
Ejemplo n.º 3
0
def ctc_align(args, device):
    """ESPnet-specific interface for CTC segmentation.

    Parses configuration, infers the CTC posterior probabilities,
    and then aligns start and end of utterances using CTC segmentation.
    Results are written to the output file given in the args.

    :param args: given configuration
    :param device: for inference; one of ['cuda', 'cpu']
    :return:  0 on success
    """
    model, train_args = load_trained_model(args.model)
    assert isinstance(model, ASRInterface)
    load_inputs_and_targets = LoadInputsAndTargets(
        mode="asr",
        load_output=True,
        sort_in_input_length=False,
        preprocess_conf=train_args.preprocess_conf
        if args.preprocess_conf is None
        else args.preprocess_conf,
        preprocess_args={"train": False},
    )
    logging.info(f"Decoding device={device}")
    # Warn for nets with high memory consumption on long audio files
    if hasattr(model, "enc"):
        encoder_module = model.enc.__class__.__module__
    elif hasattr(model, "encoder"):
        encoder_module = model.encoder.__class__.__module__
    else:
        encoder_module = "Unknown"
    logging.info(f"Encoder module: {encoder_module}")
    logging.info(f"CTC module:     {model.ctc.__class__.__module__}")
    if "rnn" not in encoder_module:
        logging.warning("No BLSTM model detected; memory consumption may be high.")
    model.to(device=device).eval()
    # read audio and text json data
    with open(args.data_json, "rb") as f:
        js = json.load(f)["utts"]
    with open(args.utt_text, "r", encoding="utf-8") as f:
        lines = f.readlines()
        i = 0
        text = {}
        segment_names = {}
        for name in js.keys():
            text_per_audio = []
            segment_names_per_audio = []
            while i < len(lines) and lines[i].startswith(name):
                text_per_audio.append(lines[i][lines[i].find(" ") + 1 :])
                segment_names_per_audio.append(lines[i][: lines[i].find(" ")])
                i += 1
            text[name] = text_per_audio
            segment_names[name] = segment_names_per_audio
    # apply configuration
    config = CtcSegmentationParameters()
    subsampling_factor = 1
    frame_duration_ms = 10
    if args.subsampling_factor is not None:
        subsampling_factor = args.subsampling_factor
    if args.frame_duration is not None:
        frame_duration_ms = args.frame_duration
    # Backwards compatibility to ctc_segmentation <= 1.5.3
    if hasattr(config, "index_duration"):
        config.index_duration = frame_duration_ms * subsampling_factor / 1000
    else:
        config.subsampling_factor = subsampling_factor
        config.frame_duration_ms = frame_duration_ms
    if args.min_window_size is not None:
        config.min_window_size = args.min_window_size
    if args.max_window_size is not None:
        config.max_window_size = args.max_window_size
    config.char_list = train_args.char_list
    if args.use_dict_blank is not None:
        logging.warning(
            "The option --use-dict-blank is deprecated. If needed,"
            " use --set-blank instead."
        )
    if args.set_blank is not None:
        config.blank = args.set_blank
    if args.replace_spaces_with_blanks is not None:
        if args.replace_spaces_with_blanks:
            config.replace_spaces_with_blanks = True
        else:
            config.replace_spaces_with_blanks = False
    if args.gratis_blank:
        config.blank_transition_cost_zero = True
    if config.blank_transition_cost_zero and args.replace_spaces_with_blanks:
        logging.error(
            "Blanks are inserted between words, and also the transition cost of blank"
            " is zero. This configuration may lead to misalignments!"
        )
    if args.scoring_length is not None:
        config.score_min_mean_over_L = args.scoring_length
    logging.info(f"Frame timings: {frame_duration_ms}ms * {subsampling_factor}")
    # Iterate over audio files to decode and align
    for idx, name in enumerate(js.keys(), 1):
        logging.info("(%d/%d) Aligning " + name, idx, len(js.keys()))
        batch = [(name, js[name])]
        feat, label = load_inputs_and_targets(batch)
        feat = feat[0]
        with torch.no_grad():
            # Encode input frames
            enc_output = model.encode(torch.as_tensor(feat).to(device)).unsqueeze(0)
            # Apply ctc layer to obtain log character probabilities
            lpz = model.ctc.log_softmax(enc_output)[0].cpu().numpy()
        # Prepare the text for aligning
        ground_truth_mat, utt_begin_indices = prepare_text(config, text[name])
        # Align using CTC segmentation
        timings, char_probs, state_list = ctc_segmentation(
            config, lpz, ground_truth_mat
        )
        logging.debug(f"state_list = {state_list}")
        # Obtain list of utterances with time intervals and confidence score
        segments = determine_utterance_segments(
            config, utt_begin_indices, char_probs, timings, text[name]
        )
        # Write to "segments" file
        for i, boundary in enumerate(segments):
            utt_segment = (
                f"{segment_names[name][i]} {name} {boundary[0]:.2f}"
                f" {boundary[1]:.2f} {boundary[2]:.9f}\n"
            )
            args.output.write(utt_segment)
    return 0
Ejemplo n.º 4
0
manager_training = tf.train.CheckpointManager(checkpoint, str(config.weights_dir / 'latest'),
                                              max_to_keep=1, checkpoint_name='latest')

checkpoint.restore(manager_training.latest_checkpoint)
if manager_training.latest_checkpoint:
    print(f'\nresuming training from step {model.step} ({manager_training.latest_checkpoint})')
else:
    print(f'\nstarting training from scratch')

all_durations = np.array([])
iterator = tqdm(enumerate(dataset.all_batches()))
step = 0

char_list = [''] +list(model.text_pipeline.tokenizer.idx_to_token.values())
smt_config = CtcSegmentationParameters(char_list=char_list)
smt_config.index_duration = 0.0115545

labelFile = open(r'/root/mydata/Corpus/transformer_tts_data.corpus/phonemized_metadata.NoStress2.txt', 'w')

for c, (spk_name_batch, mel_batch, phoneme_batch, mel_len_batch, phon_len_batch, fname_batch) in iterator:
    iterator.set_description(f'Processing dataset')

    model_out = model.predict(mel_batch)
    pred_phon = model_out['encoder_output']
    pred_phon = tf.nn.log_softmax(pred_phon)

    for i, name in enumerate(fname_batch):
        os.makedirs(os.path.join(config.duration_dir, spk_name_batch[i].numpy().decode()), exist_ok=True)

        text = list(phoneme_batch[i][:phon_len_batch[i]].numpy())
        while 358 in text: