def label_filter(label, language): label = label.translate(PRE_FILTER) label = validate_label(label) if label is None: return None, 'validation' substitutions = SUBSTITUTIONS[ language] if language in SUBSTITUTIONS else [] for pattern, replacement in substitutions: if replacement is None: if pattern.match(label): return None, 'substitution rule' else: label = pattern.sub(replacement, label) chars = [] dont_normalize = DONT_NORMALIZE[ language] if language in DONT_NORMALIZE else '' alphabet = get_alphabet(language) for c in label: if CLI_ARGS.normalize and c not in dont_normalize and not in_alphabet( alphabet, c): c = unicodedata.normalize("NFKD", c).encode("ascii", "ignore").decode( "ascii", "ignore") for sc in c: if not in_alphabet(alphabet, sc): return None, 'illegal character' chars.append(sc) label = ''.join(chars) label = validate_label(label) return label, 'validation' if label is None else None
def one_sample(sample): mp3_filename = path.join(*(sample[0].split('/'))) mp3_filename = path.join(extracted_dir, mp3_filename) # Storing wav files next to the mp3 ones - just with a different suffix wav_filename = path.splitext(mp3_filename)[0] + ".wav" _maybe_convert_wav(mp3_filename, wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) file_size = -1 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = validate_label(sample[1]) with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames/SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1
def _parse_transcriptions(trans_file): segments = [] with codecs.open(trans_file, "r", "utf-8") as fin: for line in fin: if line.startswith("#") or len(line) <= 1: continue tokens = line.split() start_time = float(tokens[1]) stop_time = float(tokens[2]) transcript = validate_label(" ".join(tokens[3:])) if transcript == None: continue # We need to do the encode-decode dance here because encode # returns a bytes() object on Python 3, and text_to_char_array # expects a string. transcript = unicodedata.normalize("NFKD", transcript) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") segments.append({ "start_time": start_time, "stop_time": stop_time, "transcript": transcript, }) return segments
def _parse_transcriptions(trans_file): segments = [] with codecs.open(trans_file, "r", "utf-8") as fin: for line in fin: if line.startswith("#") or len(line) <= 1: continue tokens = line.split() start_time = float(tokens[1]) stop_time = float(tokens[2]) transcript = validate_label(" ".join(tokens[3:])) if transcript == None: continue # We need to do the encode-decode dance here because encode # returns a bytes() object on Python 3, and text_to_char_array # expects a string. transcript = unicodedata.normalize("NFKD", transcript) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") segments.append({ "start_time": start_time, "stop_time": stop_time, "transcript": transcript, }) return segments
def _parse_transcriptions(trans_file): segments = [] with open(trans_file, "r") as fin: for line in fin: if line.startswith("#") or len(line) <= 1: continue filename_time_beg = 0 filename_time_end = line.find(" ", filename_time_beg) start_time_beg = filename_time_end + 1 start_time_end = line.find(" ", start_time_beg) stop_time_beg = start_time_end + 1 stop_time_end = line.find(" ", stop_time_beg) transcript_beg = stop_time_end + 1 transcript_end = len(line) if validate_label( line[transcript_beg:transcript_end].strip()) == None: continue segments.append({ "start_time": float(line[start_time_beg:start_time_end]), "stop_time": float(line[stop_time_beg:stop_time_end]), "speaker": line[6], "transcript": line[transcript_beg:transcript_end].strip().lower(), }) return segments
def one_sample(sample): mp3_filename = path.join(*(sample[0].split('/'))) mp3_filename = path.join(extracted_dir, mp3_filename) # Storing wav files next to the mp3 ones - just with a different suffix wav_filename = path.splitext(mp3_filename)[0] + ".wav" _maybe_convert_wav(mp3_filename, wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) file_size = -1 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = validate_label(sample[1]) with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames / SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1
def _split_wav_and_sentences(data_dir, trans_data, original_data, converted_data): trans_dir = os.path.join(data_dir, trans_data) source_dir = os.path.join(data_dir, original_data) target_dir = os.path.join(data_dir, converted_data) if not os.path.exists(target_dir): os.makedirs(target_dir) files = [] # Loop over transcription files and split corresponding wav for root, dirnames, filenames in os.walk(trans_dir): for filename in fnmatch.filter(filenames, "*.txt"): trans_file = os.path.join(root, filename) segments = _parse_transcriptions(trans_file) # Open wav corresponding to transcription file wav_filenames = [ os.path.splitext(os.path.basename(trans_file))[0] + "_c" + channel + ".wav" for channel in ["1", "2"] ] wav_files = [ os.path.join(source_dir, wav_filename) for wav_filename in wav_filenames ] print("splitting {} according to {}".format(wav_files, trans_file)) origAudios = [wave.open(wav_file, "r") for wav_file in wav_files] # Loop over segments and split wav_file for each segment for segment in segments: # Create wav segment filename start_time = segment["start_time"] stop_time = segment["stop_time"] new_wav_filename = os.path.splitext( os.path.basename(trans_file))[0] + "-" + str( start_time) + "-" + str(stop_time) + ".wav" new_wav_file = os.path.join(target_dir, new_wav_filename) channel = 0 if segment["speaker"] == "A:" else 1 _split_and_resample_wav(origAudios[channel], start_time, stop_time, new_wav_file) new_wav_filesize = os.path.getsize(new_wav_file) transcript = validate_label(segment["transcript"]) if transcript != None: files.append((os.path.abspath(new_wav_file), new_wav_filesize, transcript)) # Close origAudios for origAudio in origAudios: origAudio.close() return pandas.DataFrame( data=files, columns=["wav_filename", "wav_filesize", "transcript"])
def _convert_csv_data_to_raw_data_impl(self, audio_url, transcript, audio_length): if audio_url == "audio_url": return pd.Series(["wav_filename", "wav_filesize", "transcript"]) mp3_filename = os.path.basename(audio_url) wav_relative_filename = path.join("wav", os.path.splitext(os.path.basename(mp3_filename))[0] + ".wav") wav_filesize = path.getsize(path.join(self.target_dir, wav_relative_filename)) transcript = validate_label(transcript) if None == transcript: transcript = "" return pd.Series([wav_relative_filename, wav_filesize, transcript])
def label_filter(label): if params.normalize: label = unicodedata.normalize("NFKD", label.strip()) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") label = validate_label(label) if alphabet and label: try: [alphabet.label_from_string(c) for c in label] except KeyError: label = None return label
def label_filter(label): if params.normalize: label = unicodedata.normalize("NFKD", label.strip()) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") label = validate_label(label) if alphabet and label: try: [alphabet.label_from_string(c) for c in label] except KeyError: label = None return label
def label_filter(label): if CLI_ARGS.normalize: label = unicodedata.normalize("NFKD", label.strip()) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") label = validate_label(label) if ALPHABET and label: try: [ALPHABET.label_from_string(c) for c in label] except KeyError: label = None return label
def label_filter_fun(label): if PARAMS.normalize: label = unicodedata.normalize("NFKD", label.strip()) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") label = validate_label(label) if ALPHABET and label: try: ALPHABET.encode(label) except KeyError: label = None return label
def label_filter(label): if CLI_ARGS.normalize: label = unicodedata.normalize("NFKD", label.strip()) \ .encode("ascii", "ignore") \ .decode("ascii", "ignore") label = validate_label(label) if ALPHABET and label: try: [ALPHABET.label_from_string(c) for c in label] except KeyError: label = None return label
def _maybe_split_transcriptions(data_dir, original_data): source_dir = os.path.join(data_dir, original_data) wav_dirs = [ "swb1_d1-split-wav", "swb1_d2-split-wav", "swb1_d3-split-wav", "swb1_d4-split-wav" ] if os.path.exists(os.path.join(source_dir, "split_transcriptions_done")): print("skipping maybe_split_transcriptions") return # Loop over transcription files and split them into individual files for # each utterance for root, dirnames, filenames in os.walk(source_dir): for filename in fnmatch.filter(filenames, "*.text"): if "trans" not in filename: continue trans_file = os.path.join(root, filename) segments = _parse_transcriptions(trans_file) # Loop over segments and split wav_file for each segment for segment in segments: start_time = segment["start_time"] stop_time = segment["stop_time"] txt_filename = os.path.splitext( os.path.basename(trans_file))[0] + "-" + str( start_time) + "-" + str(stop_time) + ".txt" wav_filename = os.path.splitext( os.path.basename(trans_file))[0] + "-" + str( start_time) + "-" + str(stop_time) + ".wav" transcript = validate_label(segment["transcript"]) for wav_dir in wav_dirs: if os.path.exists( os.path.join(data_dir, wav_dir, wav_filename)): # If the transcript is valid and the txt segment filename does # not exist create it txt_file = os.path.join(data_dir, wav_dir, txt_filename) if transcript != None and not os.path.exists(txt_file): with open(txt_file, "w") as fout: fout.write(transcript) break with open(os.path.join(source_dir, "split_transcriptions_done"), "w") as fout: fout.write( "This file signals to the importer than the transcription of this source dir has already been completed." )
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, 'ts_' + ARCHIVE_NAME + '_{}.csv') if os.path.isfile(target_csv_template): return path_to_original_csv = os.path.join(extracted_dir, 'data.csv') with open(path_to_original_csv) as csv_f: data = [ d for d in csv.DictReader(csv_f, delimiter=',') if float(d['duration']) <= MAX_SECS ] with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(data): transcript = validate_label( cleanup_transcript( item['text'], english_compatible=english_compatible)) if not transcript: continue wav_filename = os.path.join(target_dir, extracted_data, item['path']) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow( dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, ))
def check_and_prepare_sentence(sentence): sentence = sentence.lower().replace('co2', 'c o zwei') chars = [] for c in sentence: if CLI_ARGS.normalize and c not in 'äöüß' and ( ALPHABET is None or not ALPHABET.has_char(c)): c = unicodedata.normalize("NFKD", c).encode("ascii", "ignore").decode( "ascii", "ignore") for sc in c: if ALPHABET is not None and not ALPHABET.has_char(c): return None chars.append(sc) return validate_label(''.join(chars))
def _convert_csv_data_to_raw_data_impl(self, audio_url, transcript, audio_length): if audio_url == "audio_url": return pd.Series(["wav_filename", "wav_filesize", "transcript"]) mp3_filename = os.path.basename(audio_url) wav_relative_filename = path.join( "wav", os.path.splitext(os.path.basename(mp3_filename))[0] + ".wav") wav_filesize = path.getsize( path.join(self.target_dir, wav_relative_filename)) transcript = validate_label(transcript) if None == transcript: transcript = "" return pd.Series([wav_relative_filename, wav_filesize, transcript])
def _split_wav_and_sentences(data_dir, trans_data, original_data, converted_data): trans_dir = os.path.join(data_dir, trans_data) source_dir = os.path.join(data_dir, original_data) target_dir = os.path.join(data_dir, converted_data) if not os.path.exists(target_dir): os.makedirs(target_dir) files = [] # Loop over transcription files and split corresponding wav for root, dirnames, filenames in os.walk(trans_dir): for filename in fnmatch.filter(filenames, "*.txt"): trans_file = os.path.join(root, filename) segments = _parse_transcriptions(trans_file) # Open wav corresponding to transcription file wav_filenames = [os.path.splitext(os.path.basename(trans_file))[0] + "_c" + channel + ".wav" for channel in ["1", "2"]] wav_files = [os.path.join(source_dir, wav_filename) for wav_filename in wav_filenames] print("splitting {} according to {}".format(wav_files, trans_file)) origAudios = [wave.open(wav_file, "r") for wav_file in wav_files] # Loop over segments and split wav_file for each segment for segment in segments: # Create wav segment filename start_time = segment["start_time"] stop_time = segment["stop_time"] new_wav_filename = os.path.splitext(os.path.basename(trans_file))[0] + "-" + str(start_time) + "-" + str(stop_time) + ".wav" new_wav_file = os.path.join(target_dir, new_wav_filename) channel = 0 if segment["speaker"] == "A:" else 1 _split_and_resample_wav(origAudios[channel], start_time, stop_time, new_wav_file) new_wav_filesize = os.path.getsize(new_wav_file) transcript = validate_label(segment["transcript"]) if transcript != None: files.append((os.path.abspath(new_wav_file), new_wav_filesize, transcript)) # Close origAudios for origAudio in origAudios: origAudio.close() return pandas.DataFrame(data=files, columns=["wav_filename", "wav_filesize", "transcript"])
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, 'ts_' + ARCHIVE_NAME + '_{}.csv') if os.path.isfile(target_csv_template): return path_to_original_csv = os.path.join(extracted_dir, 'data.csv') with open(path_to_original_csv) as csv_f: data = [ d for d in csv.DictReader(csv_f, delimiter=',') if float(d['duration']) <= MAX_SECS ] with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(data): transcript = validate_label(cleanup_transcript(item['text'], english_compatible=english_compatible)) if not transcript: continue wav_filename = os.path.join(target_dir, extracted_data, item['path']) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow(dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, ))
def _maybe_split_transcriptions(data_dir, original_data, converted_data): source_dir = os.path.join(data_dir, original_data) target_dir = os.path.join(data_dir, converted_data) if os.path.exists(os.path.join(source_dir, "split_transcriptions_done")): print("skipping maybe_split_transcriptions") return # Loop over transcription files and split them into individual files for # each utterance for root, dirnames, filenames in os.walk(source_dir): for filename in fnmatch.filter(filenames, "*.txt"): trans_file = os.path.join(root, filename) segments = _parse_transcriptions(trans_file) # Loop over segments and split wav_file for each segment for segment in segments: start_time = segment["start_time"] stop_time = segment["stop_time"] txt_filename = os.path.splitext( os.path.basename(trans_file))[0] + "-" + str( start_time) + "-" + str(stop_time) + ".txt" txt_file = os.path.join(target_dir, txt_filename) transcript = validate_label(segment["transcript"]) # If the transcript is valid, write it to the segment file if transcript != None: with open(txt_file, "w") as fout: fout.write(transcript) with open(os.path.join(source_dir, "split_transcriptions_done"), "w") as fout: fout.write( "This file signals to the importer than the transcription of this source dir has already been completed." )
def label_filter_fun(label): label = unidecode(label) label = validate_label(label) return label
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME + '_' + ARCHIVE_NAME.replace('.zip', '_{}.csv')) if os.path.isfile(target_csv_template): return ogg_root_dir = os.path.join(extracted_dir, ARCHIVE_NAME.replace('.zip', '')) # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(ogg_root_dir, '**/*.ogg') for record in glob(glob_dir, recursive=True): record_file = record.replace(ogg_root_dir + os.path.sep, '') samples.append((record_file, os.path.splitext(os.path.basename(record_file))[0])) # Keep track of how many samples are good vs. problematic counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0} lock = RLock() num_samples = len(samples) rows = [] def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ ogg_filename = path.join(ogg_root_dir, sample[0]) # Storing wav files next to the ogg ones - just with a different suffix wav_filename = path.splitext(ogg_filename)[0] + ".wav" _maybe_convert_wav(ogg_filename, wav_filename) file_size = -1 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = label_filter(sample[1]) with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames/SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 print("Importing ogg files...") pool = Pool(cpu_count()) bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = os.path.join(ogg_root_dir, item[0].replace('.ogg', '.wav')) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow(dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) if counter['failed'] > 0: print('Skipped %d samples that failed upon conversion.' % counter['failed']) if counter['invalid_label'] > 0: print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) if counter['too_short'] > 0: print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) if counter['too_long'] > 0: print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS))
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME, ARCHIVE_NAME.replace('.tar.gz', '_{}.csv')) if os.path.isfile(target_csv_template): return wav_root_dir = os.path.join(extracted_dir) all_files = [ 'transcripts/train/yaounde/fn_text.txt', 'transcripts/train/ca16_conv/transcripts.txt', 'transcripts/train/ca16_read/conditioned.txt', 'transcripts/dev/niger_west_african_fr/transcripts.txt', 'speech/dev/niger_west_african_fr/niger_wav_file_name_transcript.tsv', 'transcripts/devtest/ca16_read/conditioned.txt', 'transcripts/test/ca16/prompts.txt', ] transcripts = {} for tr in all_files: with open(os.path.join(target_dir, ARCHIVE_DIR_NAME, tr), 'r') as tr_source: for line in tr_source.readlines(): line = line.strip() if '.tsv' in tr: sep = ' ' else: sep = ' ' audio = os.path.basename(line.split(sep)[0]) if not ('.wav' in audio): if '.tdf' in audio: audio = audio.replace('.tdf', '.wav') else: audio += '.wav' transcript = ' '.join(line.split(sep)[1:]) transcripts[audio] = transcript # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(wav_root_dir, '**/*.wav') for record in glob(glob_dir, recursive=True): record_file = os.path.basename(record) if record_file in transcripts: samples.append((record, transcripts[record_file])) # Keep track of how many samples are good vs. problematic counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} lock = RLock() num_samples = len(samples) rows = [] def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ wav_filename = sample[0] file_size = -1 frames = 0 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = label_filter(sample[1]) with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames/SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames print("Importing WAV files...") pool = Pool(cpu_count()) bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = item[0] i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow(dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) if counter['failed'] > 0: print('Skipped %d samples that failed upon conversion.' % counter['failed']) if counter['invalid_label'] > 0: print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) if counter['too_short'] > 0: print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) if counter['too_long'] > 0: print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE))
def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, 'ts_' + ARCHIVE_NAME + '_{}.csv') if os.path.isfile(target_csv_template): return path_to_original_csv = os.path.join(extracted_dir, 'data.csv') with open(path_to_original_csv) as csv_f: data = [ d for d in csv.DictReader(csv_f, delimiter=',') if float(d['duration']) <= MAX_SECS ] # Keep track of how many samples are good vs. problematic counter = { 'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0 } lock = RLock() num_samples = len(data) rows = [] wav_root_dir = extracted_dir def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ orig_filename = path.join(wav_root_dir, sample['path']) # Storing wav files next to the wav ones - just with a different suffix wav_filename = path.splitext(orig_filename)[0] + ".converted.wav" _maybe_convert_wav(orig_filename, wav_filename) file_size = -1 frames = 0 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int( subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = sample['text'] with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames / SAMPLE_RATE * 1000 / 10 / 2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames / SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames print("Importing wav files...") pool = Pool(cpu_count()) bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, _ in enumerate(pool.imap_unordered(one_sample, data), start=1): bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): print('item', item) transcript = validate_label( cleanup_transcript( item[2], english_compatible=english_compatible)) if not transcript: continue wav_filename = os.path.join(target_dir, extracted_data, item[0]) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow( dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) if counter['failed'] > 0: print('Skipped %d samples that failed upon conversion.' % counter['failed']) if counter['invalid_label'] > 0: print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) if counter['too_short'] > 0: print( 'Skipped %d samples that were too short to match the transcript.' % counter['too_short']) if counter['too_long'] > 0: print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE))
def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one target_csv_template = os.path.join(target_dir, ARCHIVE_DIR_NAME + '_' + ARCHIVE_NAME.replace('.zip', '_{}.csv')) if os.path.isfile(target_csv_template): return ogg_root_dir = os.path.join(extracted_dir, ARCHIVE_NAME.replace('.zip', '')) # Get audiofile path and transcript for each sentence in tsv samples = [] glob_dir = os.path.join(ogg_root_dir, '**/*.ogg') for record in glob(glob_dir, recursive=True): record_file = record.replace(ogg_root_dir + os.path.sep, '') if record_filter(record_file): samples.append((record_file, os.path.splitext(os.path.basename(record_file))[0])) # Keep track of how many samples are good vs. problematic counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} lock = RLock() num_samples = len(samples) rows = [] def one_sample(sample): """ Take a audio file, and optionally convert it to 16kHz WAV """ ogg_filename = path.join(ogg_root_dir, sample[0]) # Storing wav files next to the ogg ones - just with a different suffix wav_filename = path.splitext(ogg_filename)[0] + ".wav" _maybe_convert_wav(ogg_filename, wav_filename) file_size = -1 frames = 0 if path.exists(wav_filename): file_size = path.getsize(wav_filename) frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) label = label_filter(sample[1]) with lock: if file_size == -1: # Excluding samples that failed upon conversion counter['failed'] += 1 elif label is None: # Excluding samples that failed on label validation counter['invalid_label'] += 1 elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): # Excluding samples that are too short to fit the transcript counter['too_short'] += 1 elif frames/SAMPLE_RATE > MAX_SECS: # Excluding very long samples to keep a reasonable batch-size counter['too_long'] += 1 else: # This one is good - keep it for the target CSV rows.append((wav_filename, file_size, label)) counter['all'] += 1 counter['total_time'] += frames print("Importing ogg files...") pool = Pool(cpu_count()) bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): bar.update(i) bar.update(num_samples) pool.close() pool.join() with open(target_csv_template.format('train'), 'w') as train_csv_file: # 80% with open(target_csv_template.format('dev'), 'w') as dev_csv_file: # 10% with open(target_csv_template.format('test'), 'w') as test_csv_file: # 10% train_writer = csv.DictWriter(train_csv_file, fieldnames=FIELDNAMES) train_writer.writeheader() dev_writer = csv.DictWriter(dev_csv_file, fieldnames=FIELDNAMES) dev_writer.writeheader() test_writer = csv.DictWriter(test_csv_file, fieldnames=FIELDNAMES) test_writer.writeheader() for i, item in enumerate(rows): transcript = validate_label(item[2]) if not transcript: continue wav_filename = os.path.join(ogg_root_dir, item[0].replace('.ogg', '.wav')) i_mod = i % 10 if i_mod == 0: writer = test_writer elif i_mod == 1: writer = dev_writer else: writer = train_writer writer.writerow(dict( wav_filename=wav_filename, wav_filesize=os.path.getsize(wav_filename), transcript=transcript, )) print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) if counter['failed'] > 0: print('Skipped %d samples that failed upon conversion.' % counter['failed']) if counter['invalid_label'] > 0: print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) if counter['too_short'] > 0: print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) if counter['too_long'] > 0: print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE))