Esempio n. 1
0
def one_sample(sample):
    mp3_filename = sample[0]
    # Storing wav files next to the mp3 ones - just with a different suffix
    wav_filename = path.splitext(mp3_filename)[0] + ".wav"
    _maybe_convert_wav(mp3_filename, wav_filename)
    frames = int(
        subprocess.check_output(["soxi", "-s", wav_filename],
                                stderr=subprocess.STDOUT))
    file_size = -1
    if os.path.exists(wav_filename):
        file_size = path.getsize(wav_filename)
        frames = int(
            subprocess.check_output(["soxi", "-s", wav_filename],
                                    stderr=subprocess.STDOUT))
    label = validate_label(sample[1])
    rows = []
    counter = get_counter()
    if file_size == -1:
        # Excluding samples that failed upon conversion
        counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        counter["invalid_label"] += 1
    elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
        # Excluding samples that are too short to fit the transcript
        counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        rows.append((wav_filename, file_size, label))
    counter["all"] += 1
    counter["total_time"] += frames
    return (counter, rows)
def one_sample(sample):
    """ Take a audio file, and optionally convert it to 16kHz WAV """
    wav_filename = sample[0]
    file_size = -1
    frames = 0
    if os.path.exists(wav_filename):
        file_size = os.path.getsize(wav_filename)
        frames = int(
            subprocess.check_output(["soxi", "-s", wav_filename],
                                    stderr=subprocess.STDOUT))
    label = label_filter(sample[1])
    counter = get_counter()
    rows = []
    if file_size == -1:
        # Excluding samples that failed upon conversion
        counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        counter["invalid_label"] += 1
    elif int(frames / SAMPLE_RATE * 1000 / 15 / 2) < len(str(label)):
        # Excluding samples that are too short to fit the transcript
        counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        rows.append((wav_filename, file_size, label))
    counter["all"] += 1
    counter["total_time"] += frames

    return (counter, rows)
Esempio n. 3
0
def one_sample(sample):
    file_size = -1
    frames = 0

    audio_source = sample[0]
    target_dir = sample[1]
    dataset_basename = sample[2]

    start_time = sample[3]
    duration = sample[4]
    label = label_filter_fun(sample[5])
    sample_id = sample[6]

    _wav_filename = os.path.basename(
        audio_source.replace(".wav", "_{:06}.wav".format(sample_id)))
    wav_fullname = os.path.join(target_dir, dataset_basename, _wav_filename)

    if not os.path.exists(wav_fullname):
        subprocess.check_output([
            "ffmpeg", "-i", audio_source, "-ss",
            str(start_time), "-t",
            str(duration), "-c", "copy", wav_fullname
        ],
                                stdin=subprocess.DEVNULL,
                                stderr=subprocess.STDOUT)

    file_size = os.path.getsize(wav_fullname)
    frames = int(
        subprocess.check_output(["soxi", "-s", wav_fullname],
                                stderr=subprocess.STDOUT))

    _counter = get_counter()
    _rows = []

    if file_size == -1:
        # Excluding samples that failed upon conversion
        _counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        _counter["invalid_label"] += 1
    elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
        # Excluding samples that are too short to fit the transcript
        _counter["too_short"] += 1
    elif frames / SAMPLE_RATE < MIN_SECS:
        # Excluding samples that are too short
        _counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        _counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        _rows.append((os.path.join(dataset_basename,
                                   _wav_filename), file_size, label))
        _counter["imported_time"] += frames
    _counter["all"] += 1
    _counter["total_time"] += frames

    return (_counter, _rows)
Esempio n. 4
0
def _maybe_convert_set(input_tsv, audio_dir, space_after_every_character=None):
    output_csv = os.path.join(
        audio_dir,
        os.path.split(input_tsv)[-1].replace("tsv", "csv"))
    print("Saving new DeepSpeech-formatted CSV file to: ", output_csv)

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    with open(input_tsv, encoding="utf-8") as input_tsv_file:
        reader = csv.DictReader(input_tsv_file, delimiter="\t")
        for row in reader:
            samples.append((os.path.join(audio_dir,
                                         row["path"]), row["sentence"]))

    counter = get_counter()
    num_samples = len(samples)
    rows = []

    print("Importing mp3 files...")
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(output_csv, "w", encoding="utf-8") as output_csv_file:
        print("Writing CSV file for DeepSpeech.py as: ", output_csv)
        writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript in bar(rows):
            if space_after_every_character:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": " ".join(transcript),
                })
            else:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": transcript,
                })

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 5
0
def _maybe_convert_set(extracted_dir, source_csv, target_csv):
    print()
    if os.path.exists(target_csv):
        print('Found CSV file "%s" - not importing "%s".' %
              (target_csv, source_csv))
        return
    print('No CSV file "%s" - importing "%s"...' % (target_csv, source_csv))
    samples = []
    with open(source_csv) as source_csv_file:
        reader = csv.DictReader(source_csv_file)
        for row in reader:
            samples.append((os.path.join(extracted_dir,
                                         row["filename"]), row["text"]))

    # Mutable counters for the concurrent embedded routine
    counter = get_counter()
    num_samples = len(samples)
    rows = []

    print("Importing mp3 files...")
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    print('Writing "%s"...' % target_csv)
    with open(target_csv, "w", encoding="utf-8",
              newline="") as target_csv_file:
        writer = csv.DictWriter(target_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript in bar(rows):
            writer.writerow({
                "wav_filename": filename,
                "wav_filesize": file_size,
                "transcript": transcript,
            })

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 6
0
def one_sample(sample):
    """ Take an audio file, and optionally convert it to 16kHz WAV """
    global AUDIO_DIR
    source_filename = sample[0]
    if not os.path.splitext(source_filename.lower())[1] == ".wav":
        source_filename += ".wav"
    # Storing wav files next to the mp3 ones - just with a different suffix
    output_filename = f"{sample[2]}.wav"
    output_filepath = os.path.join(AUDIO_DIR, output_filename)
    _maybe_convert_wav(source_filename, output_filepath)
    file_size = -1
    frames = 0
    if os.path.exists(output_filepath):
        file_size = os.path.getsize(output_filepath)
        if file_size == 0:
            frames = 0
        else:
            frames = int(
                subprocess.check_output(["soxi", "-s", output_filepath],
                                        stderr=subprocess.STDOUT))
    label = FILTER_OBJ.filter(sample[1])
    rows = []
    counter = get_counter()
    if file_size == -1:
        # Excluding samples that failed upon conversion
        counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        counter["invalid_label"] += 1
    # + 1 added for filtering surname dataset with too short audio files
    elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)) + 1:
        # Excluding samples that are too short to fit the transcript
        counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        rows.append(
            (os.path.split(output_filename)[-1], file_size, label, sample[2]))
        counter["imported_time"] += frames
    counter["all"] += 1
    counter["total_time"] += frames

    return (counter, rows)
Esempio n. 7
0
def one_sample(args):
    """ Take an audio file, and optionally convert it to 16kHz WAV """
    sample, filter_obj = args
    mp3_filename = sample[0]
    if not os.path.splitext(mp3_filename.lower())[1] == ".mp3":
        mp3_filename += ".mp3"
    # Storing wav files next to the mp3 ones - just with a different suffix
    wav_filename = os.path.splitext(mp3_filename)[0] + ".wav"
    _maybe_convert_wav(mp3_filename, wav_filename)
    file_size = -1
    frames = 0
    if os.path.exists(wav_filename):
        file_size = os.path.getsize(wav_filename)
        frames = int(
            subprocess.check_output(["soxi", "-s", wav_filename],
                                    stderr=subprocess.STDOUT))
    label = filter_obj.filter(sample[1])
    rows = []
    counter = get_counter()
    if file_size == -1:
        # Excluding samples that failed upon conversion
        counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        counter["invalid_label"] += 1
    elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
        # Excluding samples that are too short to fit the transcript
        counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        rows.append(
            (os.path.split(wav_filename)[-1], file_size, label, sample[2]))
        counter["imported_time"] += frames
    counter["all"] += 1
    counter["total_time"] += frames

    return (counter, rows)
Esempio n. 8
0
def one_sample(sample):
    """ Take a audio file, and optionally convert it to 16kHz WAV """
    orig_filename = sample["path"]
    # Storing wav files next to the wav ones - just with a different suffix
    wav_filename = os.path.splitext(orig_filename)[0] + ".converted.wav"
    _maybe_convert_wav(orig_filename, wav_filename)
    file_size = -1
    frames = 0
    if os.path.exists(wav_filename):
        file_size = os.path.getsize(wav_filename)
        frames = int(
            subprocess.check_output(
                ["soxi", "-s", wav_filename], stderr=subprocess.STDOUT
            )
        )
    label = sample["text"]

    rows = []

    # Keep track of how many samples are good vs. problematic
    counter = get_counter()
    if file_size == -1:
        # Excluding samples that failed upon conversion
        counter["failed"] += 1
    elif label is None:
        # Excluding samples that failed on label validation
        counter["invalid_label"] += 1
    elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
        # Excluding samples that are too short to fit the transcript
        counter["too_short"] += 1
    elif frames / SAMPLE_RATE > MAX_SECS:
        # Excluding very long samples to keep a reasonable batch-size
        counter["too_long"] += 1
    else:
        # This one is good - keep it for the target CSV
        rows.append((wav_filename, file_size, label))
    counter["all"] += 1
    counter["total_time"] += frames

    return (counter, rows)
def _maybe_convert_sets(target_dir, extracted_data):
    extracted_dir = os.path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(
        target_dir, ARCHIVE_DIR_NAME,
        ARCHIVE_NAME.replace(".tar.gz", "_{}.csv"))
    if os.path.isfile(target_csv_template):
        return

    wav_root_dir = os.path.join(extracted_dir)

    all_files = [
        "transcripts/train/yaounde/fn_text.txt",
        "transcripts/train/ca16_conv/transcripts.txt",
        "transcripts/train/ca16_read/conditioned.txt",
        "transcripts/dev/niger_west_african_fr/transcripts.txt",
        "speech/dev/niger_west_african_fr/niger_wav_file_name_transcript.tsv",
        "transcripts/devtest/ca16_read/conditioned.txt",
        "transcripts/test/ca16/prompts.txt",
    ]

    transcripts = {}
    for tr in all_files:
        with open(os.path.join(target_dir, ARCHIVE_DIR_NAME, tr),
                  "r") as tr_source:
            for line in tr_source.readlines():
                line = line.strip()

                if ".tsv" in tr:
                    sep = "	"
                else:
                    sep = " "

                audio = os.path.basename(line.split(sep)[0])

                if not (".wav" in audio):
                    if ".tdf" in audio:
                        audio = audio.replace(".tdf", ".wav")
                    else:
                        audio += ".wav"

                transcript = " ".join(line.split(sep)[1:])
                transcripts[audio] = transcript

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    glob_dir = os.path.join(wav_root_dir, "**/*.wav")
    for record in glob(glob_dir, recursive=True):
        record_file = os.path.basename(record)
        if record_file in transcripts:
            samples.append((record, transcripts[record_file]))

    # Keep track of how many samples are good vs. problematic
    counter = get_counter()
    num_samples = len(samples)
    rows = []

    print("Importing WAV files...")
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format("train"),
              "w") as train_csv_file:  # 80%
        with open(target_csv_template.format("dev"),
                  "w") as dev_csv_file:  # 10%
            with open(target_csv_template.format("test"),
                      "w") as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file,
                                              fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file,
                                            fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file,
                                             fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    transcript = validate_label(item[2])
                    if not transcript:
                        continue
                    wav_filename = item[0]
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(
                        dict(
                            wav_filename=wav_filename,
                            wav_filesize=os.path.getsize(wav_filename),
                            transcript=transcript,
                        ))

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 10
0
def _maybe_convert_set(dataset,
                       tsv_dir,
                       audio_dir,
                       filter_obj,
                       space_after_every_character=None,
                       rows=None,
                       exclude=None):
    exclude_transcripts = set()
    exclude_speakers = set()
    if exclude is not None:
        for sample in exclude:
            exclude_transcripts.add(sample[2])
            exclude_speakers.add(sample[3])

    if rows is None:
        rows = []
        input_tsv = os.path.join(os.path.abspath(tsv_dir), dataset + ".tsv")
        if not os.path.isfile(input_tsv):
            return rows
        print("Loading TSV file: ", input_tsv)
        # Get audiofile path and transcript for each sentence in tsv
        samples = []
        with open(input_tsv, encoding="utf-8") as input_tsv_file:
            reader = csv.DictReader(input_tsv_file, delimiter="\t")
            for row in reader:
                samples.append((os.path.join(audio_dir, row["path"]),
                                row["sentence"], row["client_id"]))

        counter = get_counter()
        num_samples = len(samples)

        print("Importing mp3 files...")
        pool = Pool()
        bar = progressbar.ProgressBar(max_value=num_samples,
                                      widgets=SIMPLE_BAR)
        samples_with_context = itertools.zip_longest(samples, [],
                                                     fillvalue=filter_obj)
        for i, processed in enumerate(pool.imap_unordered(
                one_sample, samples_with_context),
                                      start=1):
            counter += processed[0]
            rows += processed[1]
            bar.update(i)
        bar.update(num_samples)
        pool.close()
        pool.join()

        imported_samples = get_imported_samples(counter)
        assert counter["all"] == num_samples
        assert len(rows) == imported_samples
        print_import_report(counter, SAMPLE_RATE, MAX_SECS)

    output_csv = os.path.join(os.path.abspath(audio_dir), dataset + ".csv")
    print("Saving new DeepSpeech-formatted CSV file to: ", output_csv)
    with open(output_csv, "w", encoding="utf-8") as output_csv_file:
        print("Writing CSV file for DeepSpeech.py as: ", output_csv)
        writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript, speaker in bar(rows):
            if transcript in exclude_transcripts or speaker in exclude_speakers:
                continue
            if space_after_every_character:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": " ".join(transcript),
                })
            else:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": transcript,
                })
    return rows
def _maybe_convert_sets(target_dir, extracted_data):
    extracted_dir = os.path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(
        target_dir,
        ARCHIVE_DIR_NAME + "_" + ARCHIVE_NAME.replace(".zip", "_{}.csv"))
    if os.path.isfile(target_csv_template):
        return

    ogg_root_dir = os.path.join(extracted_dir,
                                ARCHIVE_NAME.replace(".zip", ""))

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    glob_dir = os.path.join(ogg_root_dir, "**/*.ogg")
    for record in glob(glob_dir, recursive=True):
        record_file = record.replace(ogg_root_dir + os.path.sep, "")
        if record_filter(record_file):
            samples.append((
                os.path.join(ogg_root_dir, record_file),
                os.path.splitext(os.path.basename(record_file))[0],
            ))

    counter = get_counter()
    num_samples = len(samples)
    rows = []

    print("Importing ogg files...")
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format("train"),
              "w") as train_csv_file:  # 80%
        with open(target_csv_template.format("dev"),
                  "w") as dev_csv_file:  # 10%
            with open(target_csv_template.format("test"),
                      "w") as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file,
                                              fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file,
                                            fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file,
                                             fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    transcript = validate_label(item[2])
                    if not transcript:
                        continue
                    wav_filename = os.path.join(
                        ogg_root_dir, item[0].replace(".ogg", ".wav"))
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(
                        dict(
                            wav_filename=wav_filename,
                            wav_filesize=os.path.getsize(wav_filename),
                            transcript=transcript,
                        ))

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 12
0
def _maybe_import_data(xml_file, audio_source, target_dir, rel_tol=1e-1):
    dataset_basename = os.path.splitext(os.path.split(xml_file)[1])[0]
    wav_root = os.path.join(target_dir, dataset_basename)
    if not os.path.exists(wav_root):
        os.makedirs(wav_root)

    source_frames = int(
        subprocess.check_output(["soxi", "-s", audio_source],
                                stderr=subprocess.STDOUT))
    print("Source audio length: %s" %
          secs_to_hours(source_frames / SAMPLE_RATE))

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    tree = ET.parse(xml_file)
    root = tree.getroot()
    seq_id = 0
    this_time = 0.0
    this_duration = 0.0
    prev_time = 0.0
    prev_duration = 0.0
    this_text = ""
    for child in root:
        if child.tag == "row":
            cur_time = float(child.attrib["timestamp"])
            cur_duration = float(child.attrib["timedur"])
            cur_text = child.text

            if this_time == 0.0:
                this_time = cur_time

            delta = cur_time - (prev_time + prev_duration)
            # rel_tol value is made from trial/error to try and compromise between:
            # - cutting enough to skip missing words
            # - not too short, not too long sentences
            is_close = math.isclose(cur_time,
                                    this_time + this_duration,
                                    rel_tol=rel_tol)
            is_short = ((this_duration + cur_duration + delta) < MAX_SECS)

            # when the previous element is close enough **and** this does not
            # go over MAX_SECS, we append content
            if (is_close and is_short):
                this_duration += cur_duration + delta
                this_text += cur_text
            else:
                samples.append((audio_source, target_dir, dataset_basename,
                                this_time, this_duration, this_text, seq_id))

                this_time = cur_time
                this_duration = cur_duration
                this_text = cur_text

                seq_id += 1

            prev_time = cur_time
            prev_duration = cur_duration

    # Keep track of how many samples are good vs. problematic
    _counter = get_counter()
    num_samples = len(samples)
    _rows = []

    print("Processing XML data: {}".format(xml_file))
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        _counter += processed[0]
        _rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    imported_samples = get_imported_samples(_counter)
    assert _counter["all"] == num_samples
    assert len(_rows) == imported_samples

    print_import_report(_counter, SAMPLE_RATE, MAX_SECS)
    print("Import efficiency: %.1f%%" %
          ((_counter["total_time"] / source_frames) * 100))
    print("")

    return _counter, _rows
Esempio n. 13
0
            label = unicodedata.normalize("NFKD", label.strip()) \
                .encode("ascii", "ignore") \
                .decode("ascii", "ignore")
        label = maybe_normalize(label)
        label = validate_label(label)
        if ALPHABET and label:
            try:
                ALPHABET.encode(label)
            except KeyError:
                label = None
        return label

    dataset_sources = _download_and_preprocess_data(
        csv_url=DATASET_RELEASE_CSV, target_dir=PARAMS.target_dir)
    sources_root_dir = os.path.dirname(dataset_sources)
    all_counter = get_counter()
    all_rows = []
    with open(dataset_sources, "r") as sources:
        for line in sources.readlines():
            d = line.split(",")
            this_xml = os.path.join(sources_root_dir, d[0])
            this_mp3 = os.path.join(sources_root_dir, d[1])
            this_rel = float(d[2])

            wav_filename = os.path.join(
                sources_root_dir,
                os.path.splitext(os.path.basename(this_mp3))[0] + ".wav")
            _maybe_convert_wav(this_mp3, wav_filename)
            counter, rows = _maybe_import_data(this_xml, wav_filename,
                                               sources_root_dir, this_rel)
Esempio n. 14
0
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False):
    extracted_dir = os.path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(target_dir, "ts_" + ARCHIVE_NAME + "_{}.csv")
    if os.path.isfile(target_csv_template):
        return
    path_to_original_csv = os.path.join(extracted_dir, "data.csv")
    with open(path_to_original_csv) as csv_f:
        data = [
            d
            for d in csv.DictReader(csv_f, delimiter=",")
            if float(d["duration"]) <= MAX_SECS
        ]

    for line in data:
        line["path"] = os.path.join(extracted_dir, line["path"])

    num_samples = len(data)
    rows = []
    counter = get_counter()

    print("Importing {} wav files...".format(num_samples))
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, data), start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format("train"), "w") as train_csv_file:  # 80%
        with open(target_csv_template.format("dev"), "w") as dev_csv_file:  # 10%
            with open(target_csv_template.format("test"), "w") as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    transcript = validate_label(
                        cleanup_transcript(
                            item[2], english_compatible=english_compatible
                        )
                    )
                    if not transcript:
                        continue
                    wav_filename = os.path.join(target_dir, extracted_data, item[0])
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(
                        dict(
                            wav_filename=wav_filename,
                            wav_filesize=os.path.getsize(wav_filename),
                            transcript=transcript,
                        )
                    )

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 15
0
def _maybe_convert_sets(target_dir, extracted_data):
    extracted_dir = os.path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME,
                                       ARCHIVE_NAME.replace(".tgz", "_{}.csv"))
    if os.path.isfile(target_csv_template):
        return

    wav_root_dir = os.path.join(extracted_dir)

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    glob_dir = os.path.join(wav_root_dir, "**/metadata.csv")
    for record in glob(glob_dir, recursive=True):
        if any(map(lambda sk: sk in record, SKIP_LIST)):  # pylint: disable=cell-var-from-loop
            continue
        with open(record, "r") as rec:
            for re in rec.readlines():
                re = re.strip().split("|")
                audio = os.path.join(os.path.dirname(record), "wavs",
                                     re[0] + ".wav")
                transcript = re[2]
                samples.append((audio, transcript))

    counter = get_counter()
    num_samples = len(samples)
    rows = []

    print("Importing WAV files...")
    pool = Pool()
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                  start=1):
        counter += processed[0]
        rows += processed[1]
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format("train"),
              "w",
              encoding="utf-8",
              newline="") as train_csv_file:  # 80%
        with open(target_csv_template.format("dev"),
                  "w",
                  encoding="utf-8",
                  newline="") as dev_csv_file:  # 10%
            with open(target_csv_template.format("test"),
                      "w",
                      encoding="utf-8",
                      newline="") as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file,
                                              fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file,
                                            fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file,
                                             fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    transcript = validate_label(item[2])
                    if not transcript:
                        continue
                    wav_filename = item[0]
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(
                        dict(
                            wav_filename=os.path.relpath(
                                wav_filename, extracted_dir),
                            wav_filesize=os.path.getsize(wav_filename),
                            transcript=transcript,
                        ))

    imported_samples = get_imported_samples(counter)
    assert counter["all"] == num_samples
    assert len(rows) == imported_samples

    print_import_report(counter, SAMPLE_RATE, MAX_SECS)
Esempio n. 16
0
def _maybe_convert_set(dataset_dir,
                       audio_dir,
                       filter_obj,
                       space_after_every_character=None,
                       rows=None):
    # iterate over all data lists and write converted version near them
    speaker_iterator = 1
    samples = []
    total_file_dict = dict()
    for subdir, dirs, files in os.walk(dataset_dir):
        for file in files:
            # Get audiofile path and transcript for each sentence in tsv
            if file.endswith(".data"):
                file_path = os.path.join(subdir, file)
                file = open(file_path, mode="r")
                data = []
                file_folder = os.path.join(os.path.dirname(subdir), "wav")
                file_dict = dict()
                for row in file.readlines():
                    if row.isspace():
                        continue
                    splitted_row = row.replace("\n", "").replace(
                        " wav ", ".wav  ").split(" ", 1)
                    if len(splitted_row) != 2:
                        continue
                    file_name, transcript = splitted_row
                    if file_name.endswith(".wav"):
                        pass
                    elif file_name.endswith(".mp3"):
                        pass
                    elif file_name.find(".") == -1:
                        file_name += ".wav"

                    if file_name.startswith("/"):
                        file_name = file_name[1::]
                    file_name = os.path.join(dataset_dir, file_name)
                    file_dict[file_name] = convert_transcript(transcript)

                file.close()

                for wav_subdir, wav_dirs, wav_files in os.walk(file_folder):
                    for wav_file in wav_files:
                        wav_file_path = os.path.join(wav_subdir, wav_file)
                        if file_dict.get(wav_file_path) is not None:
                            total_file_dict[wav_file_path] = file_dict[
                                wav_file_path]

    for key in total_file_dict.keys():
        samples.append((key, total_file_dict[key], speaker_iterator))
        speaker_iterator += 1
    del (total_file_dict)

    if rows is None:
        rows = []
        counter = get_counter()
        num_samples = len(samples)
        print("Importing dataset files...")
        pool = Pool(initializer=init_worker, initargs=(PARAMS, ))
        bar = progressbar.ProgressBar(max_value=num_samples,
                                      widgets=SIMPLE_BAR)
        for i, processed in enumerate(pool.imap_unordered(one_sample, samples),
                                      start=1):
            counter += processed[0]
            rows += processed[1]
            bar.update(i)
        bar.update(num_samples)
        pool.close()
        pool.join()

        imported_samples = get_imported_samples(counter)
        assert counter["all"] == num_samples
        assert len(rows) == imported_samples
        print_import_report(counter, SAMPLE_RATE, MAX_SECS)

    output_csv = os.path.join(os.path.abspath(audio_dir), "train.csv")
    print("Saving new DeepSpeech-formatted CSV file to: ", output_csv)
    with open(output_csv, "w", encoding="utf-8",
              newline="") as output_csv_file:
        print("Writing CSV file for DeepSpeech.py as: ", output_csv)
        writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript, speaker in bar(rows):
            if space_after_every_character:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": " ".join(transcript),
                })
            else:
                writer.writerow({
                    "wav_filename": filename,
                    "wav_filesize": file_size,
                    "transcript": transcript,
                })
    return rows