Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("-csv",
                        "--csv-files",
                        help="Str. Filenames as a comma separated list",
                        required=True)
    parser.add_argument("--sample-rate",
                        type=int,
                        default=16000,
                        required=False,
                        help="Audio sample rate")
    parser.add_argument("--channels",
                        type=int,
                        default=1,
                        required=False,
                        help="Audio channels")
    parser.add_argument("--bits-per-sample",
                        type=int,
                        default=16,
                        required=False,
                        help="Audio bits per sample")
    args = parser.parse_args()
    in_files = [os.path.abspath(i) for i in args.csv_files.split(",")]

    csv_dataframe = read_csvs(in_files)
    total_bytes = csv_dataframe['wav_filesize'].sum()
    total_files = len(csv_dataframe.index)

    bytes_without_headers = total_bytes - 44 * total_files

    total_time = bytes_without_headers / (args.sample_rate * args.channels *
                                          args.bits_per_sample / 8)

    print('total_bytes', total_bytes)
    print('total_files', total_files)
    print('bytes_without_headers', bytes_without_headers)
    print('total_time', secs_to_hours(total_time))
Beispiel #2
0
def _maybe_convert_set(input_tsv,
                       audio_dir,
                       label_filter,
                       space_after_every_character=None):
    output_csv = path.join(audio_dir,
                           os.path.split(input_tsv)[-1].replace('tsv', 'csv'))
    print("Saving new DeepSpeech-formatted CSV file to: ", output_csv)

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    with open(input_tsv, encoding='utf-8') as input_tsv_file:
        reader = csv.DictReader(input_tsv_file, delimiter='\t')
        for row in reader:
            samples.append((row['path'], row['sentence']))

    # Keep track of how many samples are good vs. problematic
    counter = {
        'all': 0,
        'failed': 0,
        'invalid_label': 0,
        'too_short': 0,
        'too_long': 0,
        'total_time': 0
    }
    lock = RLock()
    num_samples = len(samples)
    rows = []

    def one_sample(sample):
        """ Take a audio file, and optionally convert it to 16kHz WAV """
        mp3_filename = path.join(audio_dir, sample[0])
        if not path.splitext(mp3_filename.lower())[1] == '.mp3':
            mp3_filename += ".mp3"
        # Storing wav files next to the mp3 ones - just with a different suffix
        wav_filename = path.splitext(mp3_filename)[0] + ".wav"
        _maybe_convert_wav(mp3_filename, wav_filename)
        file_size = -1
        frames = 0
        if path.exists(wav_filename):
            file_size = path.getsize(wav_filename)
            frames = int(
                subprocess.check_output(['soxi', '-s', wav_filename],
                                        stderr=subprocess.STDOUT))
        label = label_filter(sample[1])
        with lock:
            if file_size == -1:
                # Excluding samples that failed upon conversion
                counter['failed'] += 1
            elif label is None:
                # Excluding samples that failed on label validation
                counter['invalid_label'] += 1
            elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
                # Excluding samples that are too short to fit the transcript
                counter['too_short'] += 1
            elif frames / SAMPLE_RATE > MAX_SECS:
                # Excluding very long samples to keep a reasonable batch-size
                counter['too_long'] += 1
            else:
                # This one is good - keep it for the target CSV
                rows.append(
                    (os.path.split(wav_filename)[-1], file_size, label))
            counter['all'] += 1
            counter['total_time'] += frames

    print("Importing mp3 files...")
    pool = Pool(cpu_count())
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1):
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(output_csv, 'w', encoding='utf-8') as output_csv_file:
        print('Writing CSV file for DeepSpeech.py as: ', output_csv)
        writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript in bar(rows):
            if space_after_every_character:
                writer.writerow({
                    'wav_filename': filename,
                    'wav_filesize': file_size,
                    'transcript': ' '.join(transcript)
                })
            else:
                writer.writerow({
                    'wav_filename': filename,
                    'wav_filesize': file_size,
                    'transcript': transcript
                })

    print('Imported %d samples.' %
          (counter['all'] - counter['failed'] - counter['too_short'] -
           counter['too_long']))
    if counter['failed'] > 0:
        print('Skipped %d samples that failed upon conversion.' %
              counter['failed'])
    if counter['invalid_label'] > 0:
        print('Skipped %d samples that failed on transcript validation.' %
              counter['invalid_label'])
    if counter['too_short'] > 0:
        print(
            'Skipped %d samples that were too short to match the transcript.' %
            counter['too_short'])
    if counter['too_long'] > 0:
        print('Skipped %d samples that were longer than %d seconds.' %
              (counter['too_long'], MAX_SECS))
    print('Final amount of imported audio: %s.' %
          secs_to_hours(counter['total_time'] / SAMPLE_RATE))
Beispiel #3
0
def _maybe_convert_sets(target_dir, extracted_data):
    extracted_dir = path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME, ARCHIVE_NAME.replace('.tgz', '_{}.csv'))
    if os.path.isfile(target_csv_template):
        return

    wav_root_dir = os.path.join(extracted_dir)

    # Get audiofile path and transcript for each sentence in tsv
    samples = []
    glob_dir = os.path.join(wav_root_dir, '**/metadata.csv')
    for record in glob(glob_dir, recursive=True):
        for sk in SKIP_LIST:
            if not (sk in record):
                with open(record, 'r') as rec:
                    for re in rec.readlines():
                        re = re.strip().split('|')
                        audio = os.path.join(os.path.dirname(record), 'wavs', re[0] + '.wav')
                        transcript = re[2]
                        samples.append((audio, transcript))

    # Keep track of how many samples are good vs. problematic
    counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0}
    lock = RLock()
    num_samples = len(samples)
    rows = []

    def one_sample(sample):
        """ Take a audio file, and optionally convert it to 16kHz WAV """
        wav_filename = sample[0]
        file_size = -1
        frames = 0
        if path.exists(wav_filename):
            file_size = path.getsize(wav_filename)
            frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT))
        label = label_filter(sample[1])
        with lock:
            if file_size == -1:
                # Excluding samples that failed upon conversion
                counter['failed'] += 1
            elif label is None:
                # Excluding samples that failed on label validation
                counter['invalid_label'] += 1
            elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)):
                # Excluding samples that are too short to fit the transcript
                counter['too_short'] += 1
            elif frames/SAMPLE_RATE > MAX_SECS:
                # Excluding very long samples to keep a reasonable batch-size
                counter['too_long'] += 1
            else:
                # This one is good - keep it for the target CSV
                rows.append((wav_filename, file_size, label))
            counter['all'] += 1
            counter['total_time'] += frames

    print("Importing WAV files...")
    pool = Pool(cpu_count())
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1):
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format('train'), 'w') as train_csv_file:  # 80%
        with open(target_csv_template.format('dev'), 'w') as dev_csv_file:  # 10%
            with open(target_csv_template.format('test'), 'w') as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    transcript = validate_label(item[2])
                    if not transcript:
                        continue
                    wav_filename = item[0]
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(dict(
                        wav_filename=wav_filename,
                        wav_filesize=os.path.getsize(wav_filename),
                        transcript=transcript,
                    ))

    print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long']))
    if counter['failed'] > 0:
        print('Skipped %d samples that failed upon conversion.' % counter['failed'])
    if counter['invalid_label'] > 0:
        print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label'])
    if counter['too_short'] > 0:
        print('Skipped %d samples that were too short to match the transcript.' % counter['too_short'])
    if counter['too_long'] > 0:
        print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS))
    print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE))
Beispiel #4
0
def _maybe_convert_set(_dir):
    input_tsv = _clean_text(join(_dir, 'utt_spk_text.tsv'))

    samples = []
    with open(input_tsv) as input_tsv_file:
        reader = csv.DictReader(input_tsv_file,
                                delimiter='\t',
                                fieldnames=['path', 'id', 'sentence'])
        for row in reader:
            samples.append((row['path'], row['sentence']))

    counter = {
        'all': 0,
        'failed': 0,
        'invalid_label': 0,
        'too_short': 0,
        'too_long': 0,
        'total_time': 0
    }
    lock = RLock()
    num_samples = len(samples)
    rows = []

    def one_sample(sample):
        _path = sample[0]
        flac_filename = os.path.abspath(
            path.join(_dir, 'data', _path[:2], _path + '.flac'))
        wav_file = join(_path[:2], _path + ".wav")
        wav_filename = join(STORE, wav_file)
        _maybe_convert_wav(flac_filename, wav_filename)
        _file_size = -1
        frames = 0

        if path.exists(wav_filename):
            _file_size = path.getsize(wav_filename)
            frames = int(
                subprocess.check_output(['soxi', '-s', wav_filename],
                                        stderr=subprocess.STDOUT))
        label = label_filter_fun(sample[1])
        with lock:
            if _file_size == -1:
                # Excluding samples that failed upon conversion
                counter['failed'] += 1
            elif label is None:
                # Excluding samples that failed on label validation
                counter['invalid_label'] += 1
            elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)):
                # Excluding samples that are too short to fit the transcript
                counter['too_short'] += 1
            elif frames / SAMPLE_RATE > MAX_SECS:
                # Excluding very long samples to keep a reasonable batch-size
                counter['too_long'] += 1
            else:
                # This one is good - keep it for the target CSV
                rows.append((wav_file, _file_size, label))
            counter['all'] += 1
            counter['total_time'] += frames

    print("Importing mp3 files...")
    pool = Pool(cpu_count())
    bar = progressbar.ProgressBar(maxval=num_samples, widgets=SIMPLE_BAR)
    for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=0):
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(OUTPUT_CSV, 'w') as output_csv_file:
        print('Writing CSV file for DeepSpeech.py as: ', OUTPUT_CSV)
        writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES)
        writer.writeheader()
        bar = progressbar.ProgressBar(maxval=len(rows), widgets=SIMPLE_BAR)
        for filename, file_size, transcript in bar(rows):
            writer.writerow({
                'wav_filename': filename,
                'wav_filesize': file_size,
                'transcript': transcript
            })

    print('Imported %d samples.' %
          (counter['all'] - counter['failed'] - counter['too_short'] -
           counter['too_long']))
    if counter['failed'] > 0:
        print('Skipped %d samples that failed upon conversion.' %
              counter['failed'])
    if counter['invalid_label'] > 0:
        print('Skipped %d samples that failed on transcript validation.' %
              counter['invalid_label'])
    if counter['too_short'] > 0:
        print(
            'Skipped %d samples that were too short to match the transcript.' %
            counter['too_short'])
    if counter['too_long'] > 0:
        print('Skipped %d samples that were longer than %d seconds.' %
              (counter['too_long'], MAX_SECS))
    print('Final amount of imported audio: %s.' %
          secs_to_hours(counter['total_time'] / SAMPLE_RATE))
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False):
    extracted_dir = path.join(target_dir, extracted_data)
    # override existing CSV with normalized one
    target_csv_template = os.path.join(target_dir, 'ts_' + ARCHIVE_NAME + '_{}.csv')
    if os.path.isfile(target_csv_template):
        return
    path_to_original_csv = os.path.join(extracted_dir, 'data.csv')
    with open(path_to_original_csv) as csv_f:
        data = [
            d for d in csv.DictReader(csv_f, delimiter=',')
            if float(d['duration']) <= MAX_SECS
        ]

    # Keep track of how many samples are good vs. problematic
    counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0}
    lock = RLock()
    num_samples = len(data)
    rows = []

    wav_root_dir = extracted_dir

    def one_sample(sample):
        """ Take a audio file, and optionally convert it to 16kHz WAV """
        orig_filename = path.join(wav_root_dir, sample['path'])
        # Storing wav files next to the wav ones - just with a different suffix
        wav_filename = path.splitext(orig_filename)[0] + ".converted.wav"
        _maybe_convert_wav(orig_filename, wav_filename)
        file_size = -1
        frames = 0
        if path.exists(wav_filename):
            file_size = path.getsize(wav_filename)
            frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT))
        label = sample['text']
        with lock:
            if file_size == -1:
                # Excluding samples that failed upon conversion
                counter['failed'] += 1
            elif label is None:
                # Excluding samples that failed on label validation
                counter['invalid_label'] += 1
            elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)):
                # Excluding samples that are too short to fit the transcript
                counter['too_short'] += 1
            elif frames/SAMPLE_RATE > MAX_SECS:
                # Excluding very long samples to keep a reasonable batch-size
                counter['too_long'] += 1
            else:
                # This one is good - keep it for the target CSV
                rows.append((wav_filename, file_size, label))
            counter['all'] += 1
            counter['total_time'] += frames

    print("Importing wav files...")
    pool = Pool(cpu_count())
    bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR)
    for i, _ in enumerate(pool.imap_unordered(one_sample, data), start=1):
        bar.update(i)
    bar.update(num_samples)
    pool.close()
    pool.join()

    with open(target_csv_template.format('train'), 'w') as train_csv_file:  # 80%
        with open(target_csv_template.format('dev'), 'w') as dev_csv_file:  # 10%
            with open(target_csv_template.format('test'), 'w') as test_csv_file:  # 10%
                train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES)
                train_writer.writeheader()
                dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES)
                dev_writer.writeheader()
                test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES)
                test_writer.writeheader()

                for i, item in enumerate(rows):
                    print('item', item)
                    transcript = validate_label(cleanup_transcript(item[2], english_compatible=english_compatible))
                    if not transcript:
                        continue
                    wav_filename = os.path.join(target_dir, extracted_data, item[0])
                    i_mod = i % 10
                    if i_mod == 0:
                        writer = test_writer
                    elif i_mod == 1:
                        writer = dev_writer
                    else:
                        writer = train_writer
                    writer.writerow(dict(
                        wav_filename=wav_filename,
                        wav_filesize=os.path.getsize(wav_filename),
                        transcript=transcript,
                    ))

    print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long']))
    if counter['failed'] > 0:
        print('Skipped %d samples that failed upon conversion.' % counter['failed'])
    if counter['invalid_label'] > 0:
        print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label'])
    if counter['too_short'] > 0:
        print('Skipped %d samples that were too short to match the transcript.' % counter['too_short'])
    if counter['too_long'] > 0:
        print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS))
    print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE))