def one_sample(sample): mp3_filename = sample[0] # Storing wav files next to the mp3 ones - just with a different suffix wav_filename = path.splitext(mp3_filename)[0] + ".wav" _maybe_convert_wav(mp3_filename, wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) file_size = -1 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = validate_label(sample[1]) rows = [] counter = get_counter() if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames / SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames return (counter, rows)
def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ wav_filename = sample[0] file_size = -1 frames = 0 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = label_filter(sample[1]) counter = get_counter() rows = [] if file_size == -1: # Excluding samples that failed upon conversion print("conversion failure", wav_filename) counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames/SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames return (counter, rows)
def _maybe_convert_set(input_tsv, audio_dir, space_after_every_character=None): output_csv = path.join(audio_dir, os.path.split(input_tsv)[-1].replace('tsv', 'csv')) print("Saving new DeepSpeech-formatted CSV file to: ", output_csv) # Get audiofile path and transcript for each sentence in tsv samples = [] with open(input_tsv, encoding='utf-8') as input_tsv_file: reader = csv.DictReader(input_tsv_file, delimiter='\t') for row in reader: samples.append((path.join(audio_dir, row['path']), row['sentence'])) counter = get_counter() num_samples = len(samples) rows = [] print("Importing mp3 files...") pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(output_csv, 'w', encoding='utf-8') as output_csv_file: print('Writing CSV file for DeepSpeech.py as: ', output_csv) writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES) writer.writeheader() bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR) for filename, file_size, transcript in bar(rows): if space_after_every_character: writer.writerow({ 'wav_filename': filename, 'wav_filesize': file_size, 'transcript': ' '.join(transcript) }) else: writer.writerow({ 'wav_filename': filename, 'wav_filesize': file_size, 'transcript': transcript }) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)
def _maybe_convert_set(extracted_dir, source_csv, target_csv): print() if path.exists(target_csv): print('Found CSV file "%s" - not importing "%s".' % (target_csv, source_csv)) return print('No CSV file "%s" - importing "%s"...' % (target_csv, source_csv)) samples = [] with open(source_csv) as source_csv_file: reader = csv.DictReader(source_csv_file) for row in reader: samples.append((os.path.join(extracted_dir, row['filename']), row['text'])) # Mutable counters for the concurrent embedded routine counter = get_counter() num_samples = len(samples) rows = [] print('Importing mp3 files...') pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() print('Writing "%s"...' % target_csv) with open(target_csv, 'w') as target_csv_file: writer = csv.DictWriter(target_csv_file, fieldnames=FIELDNAMES) writer.writeheader() bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR) for filename, file_size, transcript in bar(rows): writer.writerow({ 'wav_filename': filename, 'wav_filesize': file_size, 'transcript': transcript }) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)
def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ orig_filename = sample['path'] # Storing wav files next to the wav ones - just with a different suffix wav_filename = path.splitext(orig_filename)[0] + ".converted.wav" _maybe_convert_wav(orig_filename, wav_filename) file_size = -1 frames = 0 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = sample['text'] rows = [] # Keep track of how many samples are good vs. problematic counter = get_counter() if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames / SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames return (counter, rows)
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, 'ts_' + ARCHIVE_NAME + '_{}.csv') if os.path.isfile(target_csv_template): return path_to_original_csv = os.path.join(extracted_dir, 'data.csv') with open(path_to_original_csv) as csv_f: data = [ d for d in csv.DictReader(csv_f, delimiter=',') if float(d['duration']) <= MAX_SECS ] for line in data: line['path'] = os.path.join(extracted_dir, line['path']) num_samples = len(data) rows = [] counter = get_counter() print("Importing {} wav files...".format(num_samples)) pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, data), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label( cleanup_transcript( item[2], english_compatible=english_compatible)) if not transcript: continue wav_filename = os.path.join(target_dir, extracted_data, item[0]) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow( dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join( target_dir, ARCHIVE_DIR_NAME + '_' + ARCHIVE_NAME.replace('.zip', '_{}.csv')) if os.path.isfile(target_csv_template): return ogg_root_dir = os.path.join(extracted_dir, ARCHIVE_NAME.replace('.zip', '')) # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(ogg_root_dir, '**/*.ogg') for record in glob(glob_dir, recursive=True): record_file = record.replace(ogg_root_dir + os.path.sep, '') if record_filter(record_file): samples.append( (os.path.join(ogg_root_dir, record_file), os.path.splitext(os.path.basename(record_file))[0])) counter = get_counter() num_samples = len(samples) rows = [] print("Importing ogg files...") pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = os.path.join( ogg_root_dir, item[0].replace('.ogg', '.wav')) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow( dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME, ARCHIVE_NAME.replace('.tgz', '_{}.csv')) if os.path.isfile(target_csv_template): return wav_root_dir = os.path.join(extracted_dir) # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(wav_root_dir, '**/metadata.csv') for record in glob(glob_dir, recursive=True): if any(map(lambda sk: sk in record, SKIP_LIST)): # pylint: disable=cell-var-from-loop continue with open(record, 'r') as rec: for re in rec.readlines(): re = re.strip().split('|') audio = os.path.join(os.path.dirname(record), 'wavs', re[0] + '.wav') transcript = re[2] samples.append((audio, transcript)) counter = get_counter() num_samples = len(samples) rows = [] print("Importing WAV files...") pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = item[0] i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow(dict( wav_filename=os.path.relpath(wav_filename, extracted_dir), wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join( target_dir, ARCHIVE_DIR_NAME, ARCHIVE_NAME.replace('.tar.gz', '_{}.csv')) if os.path.isfile(target_csv_template): return wav_root_dir = os.path.join(extracted_dir) all_files = [ 'transcripts/train/yaounde/fn_text.txt', 'transcripts/train/ca16_conv/transcripts.txt', 'transcripts/train/ca16_read/conditioned.txt', 'transcripts/dev/niger_west_african_fr/transcripts.txt', 'speech/dev/niger_west_african_fr/niger_wav_file_name_transcript.tsv', 'transcripts/devtest/ca16_read/conditioned.txt', 'transcripts/test/ca16/prompts.txt', ] transcripts = {} for tr in all_files: with open(os.path.join(target_dir, ARCHIVE_DIR_NAME, tr), 'r') as tr_source: for line in tr_source.readlines(): line = line.strip() if '.tsv' in tr: sep = ' ' else: sep = ' ' audio = os.path.basename(line.split(sep)[0]) if not ('.wav' in audio): if '.tdf' in audio: audio = audio.replace('.tdf', '.wav') else: audio += '.wav' transcript = ' '.join(line.split(sep)[1:]) transcripts[audio] = transcript # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(wav_root_dir, '**/*.wav') for record in glob(glob_dir, recursive=True): record_file = os.path.basename(record) if record_file in transcripts: samples.append((record, transcripts[record_file])) # Keep track of how many samples are good vs. problematic counter = get_counter() num_samples = len(samples) rows = [] print("Importing WAV files...") pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): counter += processed[0] rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = item[0] i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow( dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) imported_samples = get_imported_samples(counter) assert counter['all'] == num_samples assert len(rows) == imported_samples print_import_report(counter, SAMPLE_RATE, MAX_SECS)