def hyp(input_path, output_path, device, batch_size, html, ext, sample_rate, max_duration): os.makedirs(output_path, exist_ok=True) audio_source = ([ (input_path, audio_name) for audio_name in os.listdir(input_path) ] if os.path.isdir(input_path) else [(os.path.dirname(input_path), os.path.basename(input_path))]) model = PyannoteDiarizationModel(device=device, batch_size=batch_size) for i, (input_path, audio_name) in enumerate(audio_source): print(i, '/', len(audio_source), audio_name) audio_path = os.path.join(input_path, audio_name) noextname = audio_name[:-len(ext)] transcript_path = os.path.join(output_path, noextname + '.json') rttm_path = os.path.join(output_path, noextname + '.rttm') signal, sample_rate = audio.read_audio(audio_path, sample_rate=sample_rate, mono=True, dtype='float32', duration=max_duration) transcript = model(signal, sample_rate=sample_rate, extra=dict(audio_path=audio_path)) transcripts.collect_speaker_names(transcript, set_speaker_data=True) transcripts.save(transcript_path, transcript) print(transcript_path) transcripts.save(rttm_path, transcript) print(rttm_path) if html: html_path = os.path.join(output_path, audio_name + '.html') vis.transcript(html_path, sample_rate=sample_rate, mono=True, transcript=transcript, duration=max_duration)
def main(args, ext_json=['.json', '.json.gz']): utils.enable_jit_fusion() assert args.output_json or args.output_html or args.output_txt or args.output_csv, \ 'at least one of the output formats must be provided' os.makedirs(args.output_path, exist_ok=True) audio_data_paths = set( p for f in args.input_path for p in ([os.path.join(f, g) for g in os.listdir(f)] if os.path.isdir(f) else [f]) if os.path.isfile(p) and any(map(p.endswith, args.ext))) json_data_paths = set( p for p in args.input_path if any(map(p.endswith, ext_json)) and not utils.strip_suffixes(p, ext_json) in audio_data_paths) data_paths = list(audio_data_paths | json_data_paths) exclude = set([ os.path.splitext(basename)[0] for basename in os.listdir(args.output_path) if basename.endswith('.json') ]) if args.skip_processed else None data_paths = [ path for path in data_paths if exclude is None or os.path.basename(path) not in exclude ] text_pipeline, frontend, model, generator = setup(args) val_dataset = datasets.AudioTextDataset( data_paths, [text_pipeline], args.sample_rate, frontend=frontend if not args.frontend_in_model else None, mono=args.mono, time_padding_multiple=args.batch_time_padding_multiple, audio_backend=args.audio_backend, exclude=exclude, max_duration=args.transcribe_first_n_sec, mode='batched_channels' if args.join_transcript else 'batched_transcript', string_array_encoding=args.dataset_string_array_encoding, debug_short_long_records_features_from_whole_normalized_signal=args. debug_short_long_records_features_from_whole_normalized_signal) print('Examples count: ', len(val_dataset)) val_meta = val_dataset.pop_meta() val_data_loader = torch.utils.data.DataLoader( val_dataset, batch_size=None, collate_fn=val_dataset.collate_fn, num_workers=args.num_workers) csv_sep = dict(tab='\t', comma=',')[args.csv_sep] csv_lines = [] # only used if args.output_csv is True oom_handler = utils.OomHandler(max_retries=args.oom_retries) for i, (meta, s, x, xlen, y, ylen) in enumerate(val_data_loader): print(f'Processing: {i}/{len(val_dataset)}') meta = [val_meta[t['example_id']] for t in meta] audio_path = meta[0]['audio_path'] audio_name = transcripts.audio_name(audio_path) begin_end = [dict(begin=t['begin'], end=t['end']) for t in meta] begin = torch.tensor([t['begin'] for t in begin_end], dtype=torch.float) end = torch.tensor([t['end'] for t in begin_end], dtype=torch.float) #TODO WARNING assumes frontend not in dataset if not args.frontend_in_model: print('\n' * 10 + 'WARNING\n' * 5) print( 'transcribe.py assumes frontend in model, in other case time alignment was incorrect' ) print('WARNING\n' * 5 + '\n') duration = x.shape[-1] / args.sample_rate channel = [t['channel'] for t in meta] speaker = [t['speaker'] for t in meta] speaker_name = [t['speaker_name'] for t in meta] if x.numel() == 0: print(f'Skipping empty [{audio_path}].') continue try: tic = time.time() y, ylen = y.to(args.device), ylen.to(args.device) log_probs, logits, olen = model( x.squeeze(1).to(args.device), xlen.to(args.device)) print('Input:', audio_name) print('Input time steps:', log_probs.shape[-1], '| target time steps:', y.shape[-1]) print( 'Time: audio {audio:.02f} sec | processing {processing:.02f} sec' .format(audio=sum(map(transcripts.compute_duration, meta)), processing=time.time() - tic)) ts: shaping.Bt = duration * torch.linspace( 0, 1, steps=log_probs.shape[-1], device=log_probs.device).unsqueeze(0).expand(x.shape[0], -1) ref_segments = [[ dict(channel=channel[i], begin=begin_end[i]['begin'], end=begin_end[i]['end'], ref=text_pipeline.postprocess( text_pipeline.preprocess(meta[i]['ref']))) ] for i in range(len(meta))] hyp_segments = [ alternatives[0] for alternatives in generator.generate( tokenizer=text_pipeline.tokenizer, log_probs=log_probs, begin=begin, end=end, output_lengths=olen, time_stamps=ts, segment_text_key='hyp', segment_extra_info=[ dict(speaker=s, speaker_name=sn, channel=c) for s, sn, c in zip(speaker, speaker_name, channel) ]) ] hyp_segments = [ transcripts.map_text(text_pipeline.postprocess, hyp=hyp) for hyp in hyp_segments ] hyp, ref = '\n'.join( transcripts.join(hyp=h) for h in hyp_segments).strip(), '\n'.join( transcripts.join(ref=r) for r in ref_segments).strip() if args.verbose: print('HYP:', hyp) print('CER: {cer:.02%}'.format(cer=metrics.cer(hyp=hyp, ref=ref))) tic_alignment = time.time() if args.align and y.numel() > 0: alignment: shaping.BY = ctc.alignment( log_probs.permute(2, 0, 1), y[:, 0, :], # assumed that 0 channel is char labels olen, ylen[:, 0], blank=text_pipeline.tokenizer.eps_id, pack_backpointers=args.pack_backpointers) aligned_ts: shaping.Bt = ts.gather(1, alignment) ref_segments = [ alternatives[0] for alternatives in generator.generate( tokenizer=text_pipeline.tokenizer, log_probs=torch.nn.functional.one_hot( y[:, 0, :], num_classes=log_probs.shape[1]).permute( 0, 2, 1), begin=begin, end=end, output_lengths=ylen, time_stamps=aligned_ts, segment_text_key='ref', segment_extra_info=[ dict(speaker=s, speaker_name=sn, channel=c) for s, sn, c in zip(speaker, speaker_name, channel) ]) ] ref_segments = [ transcripts.map_text(text_pipeline.postprocess, hyp=ref) for ref in ref_segments ] oom_handler.reset() except: if oom_handler.try_recover(model.parameters()): print(f'Skipping {i} / {len(val_dataset)}') continue else: raise print('Alignment time: {:.02f} sec'.format(time.time() - tic_alignment)) ref_transcript, hyp_transcript = [ sorted(transcripts.flatten(segments), key=transcripts.sort_key) for segments in [ref_segments, hyp_segments] ] if args.max_segment_duration: if ref: ref_segments = list( transcripts.segment_by_time(ref_transcript, args.max_segment_duration)) hyp_segments = list( transcripts.segment_by_ref(hyp_transcript, ref_segments)) else: hyp_segments = list( transcripts.segment_by_time(hyp_transcript, args.max_segment_duration)) ref_segments = [[] for _ in hyp_segments] #### HACK for diarization elif args.ref_transcript_path and args.join_transcript: audio_name_hack = audio_name.split('.')[0] #TODO: normalize ref field ref_segments = [[t] for t in sorted(transcripts.load( os.path.join(args.ref_transcript_path, audio_name_hack + '.json')), key=transcripts.sort_key)] hyp_segments = list( transcripts.segment_by_ref(hyp_transcript, ref_segments, set_speaker=True, soft=False)) #### END OF HACK has_ref = bool(transcripts.join(ref=transcripts.flatten(ref_segments))) transcript = [] for hyp_transcript, ref_transcript in zip(hyp_segments, ref_segments): hyp, ref = transcripts.join(hyp=hyp_transcript), transcripts.join( ref=ref_transcript) transcript.append( dict(audio_path=audio_path, ref=ref, hyp=hyp, speaker_name=transcripts.speaker_name(ref=ref_transcript, hyp=hyp_transcript), words=metrics.align_words( *metrics.align_strings(hyp=hyp, ref=ref)) if args.align_words else [], words_ref=ref_transcript, words_hyp=hyp_transcript, **transcripts.summary(hyp_transcript), **(dict(cer=metrics.cer(hyp=hyp, ref=ref)) if has_ref else {}))) transcripts.collect_speaker_names(transcript, set_speaker_data=True, num_speakers=2) filtered_transcript = list( transcripts.prune(transcript, align_boundary_words=args.align_boundary_words, cer=args.prune_cer, duration=args.prune_duration, gap=args.prune_gap, allowed_unk_count=args.prune_unk, num_speakers=args.prune_num_speakers)) print('Filtered segments:', len(filtered_transcript), 'out of', len(transcript)) if args.output_json: transcript_path = os.path.join(args.output_path, audio_name + '.json') print(transcripts.save(transcript_path, filtered_transcript)) if args.output_html: transcript_path = os.path.join(args.output_path, audio_name + '.html') print( vis.transcript(transcript_path, args.sample_rate, args.mono, transcript, filtered_transcript)) if args.output_txt: transcript_path = os.path.join(args.output_path, audio_name + '.txt') with open(transcript_path, 'w') as f: f.write(' '.join(t['hyp'].strip() for t in filtered_transcript)) print(transcript_path) if args.output_csv: assert len({t['audio_path'] for t in filtered_transcript}) == 1 audio_path = filtered_transcript[0]['audio_path'] hyp = ' '.join(t['hyp'].strip() for t in filtered_transcript) begin = min(t['begin'] for t in filtered_transcript) end = max(t['end'] for t in filtered_transcript) csv_lines.append( csv_sep.join([audio_path, hyp, str(begin), str(end)])) if args.logits: logits_file_path = os.path.join(args.output_path, audio_name + '.pt') if args.logits_crop: begin_end = [ dict( zip(['begin', 'end'], [ t['begin'] + c / float(o) * (t['end'] - t['begin']) for c in args.logits_crop ])) for o, t in zip(olen, begin_end) ] logits_crop = [slice(*args.logits_crop) for o in olen] else: logits_crop = [slice(int(o)) for o in olen] # TODO: filter ref / hyp by channel? torch.save([ dict(audio_path=audio_path, logits=l[..., logits_crop[i]], **begin_end[i], ref=ref, hyp=hyp) for i, l in enumerate(logits.cpu()) ], logits_file_path) print(logits_file_path) print('Done: {:.02f} sec\n'.format(time.time() - tic)) if args.output_csv: transcript_path = os.path.join(args.output_path, 'transcripts.csv') with open(transcript_path, 'w') as f: f.write('\n'.join(csv_lines)) print(transcript_path)
def main(args): utils.enable_jit_fusion() assert args.output_json or args.output_html or args.output_txt or args.output_csv, \ 'at least one of the output formats must be provided' os.makedirs(args.output_path, exist_ok=True) data_paths = [ p for f in args.input_path for p in ([os.path.join(f, g) for g in os.listdir(f)] if os.path.isdir(f) else [f]) if os.path.isfile(p) and any(map(p.endswith, args.ext)) ] + [ p for p in args.input_path if any(map(p.endswith, ['.json', '.json.gz'])) ] exclude = set([ os.path.splitext(basename)[0] for basename in os.listdir(args.output_path) if basename.endswith('.json') ] if args.skip_processed else []) data_paths = [ path for path in data_paths if os.path.basename(path) not in exclude ] labels, frontend, model, decoder = setup(args) val_dataset = datasets.AudioTextDataset( data_paths, [labels], args.sample_rate, frontend=None, segmented=True, mono=args.mono, time_padding_multiple=args.batch_time_padding_multiple, audio_backend=args.audio_backend, exclude=exclude, max_duration=args.transcribe_first_n_sec, join_transcript=args.join_transcript, string_array_encoding=args.dataset_string_array_encoding) num_examples = len(val_dataset) print('Examples count: ', num_examples) val_data_loader = torch.utils.data.DataLoader( val_dataset, batch_size=None, collate_fn=val_dataset.collate_fn, num_workers=args.num_workers) csv_sep = dict(tab='\t', comma=',')[args.csv_sep] output_lines = [] # only used if args.output_csv is True oom_handler = utils.OomHandler(max_retries=args.oom_retries) for i, (meta, s, x, xlen, y, ylen) in enumerate(val_data_loader): print(f'Processing: {i}/{num_examples}') meta = [val_dataset.meta.get(m['example_id']) for m in meta] audio_path = meta[0]['audio_path'] if x.numel() == 0: print(f'Skipping empty [{audio_path}].') continue begin = meta[0]['begin'] end = meta[0]['end'] audio_name = transcripts.audio_name(audio_path) try: tic = time.time() y, ylen = y.to(args.device), ylen.to(args.device) log_probs, olen = model( x.squeeze(1).to(args.device), xlen.to(args.device)) #speech = vad.detect_speech(x.squeeze(1), args.sample_rate, args.window_size, aggressiveness = args.vad, window_size_dilate = args.window_size_dilate) #speech = vad.upsample(speech, log_probs) #log_probs.masked_fill_(models.silence_space_mask(log_probs, speech, space_idx = labels.space_idx, blank_idx = labels.blank_idx), float('-inf')) decoded = decoder.decode(log_probs, olen) print('Input:', audio_name) print('Input time steps:', log_probs.shape[-1], '| target time steps:', y.shape[-1]) print( 'Time: audio {audio:.02f} sec | processing {processing:.02f} sec' .format(audio=sum( transcripts.compute_duration(t) for t in meta), processing=time.time() - tic)) ts = (x.shape[-1] / args.sample_rate) * torch.linspace( 0, 1, steps=log_probs.shape[-1]).unsqueeze(0) + torch.FloatTensor( [t['begin'] for t in meta]).unsqueeze(1) channel = [t['channel'] for t in meta] speaker = [t['speaker'] for t in meta] ref_segments = [[ dict(channel=channel[i], begin=meta[i]['begin'], end=meta[i]['end'], ref=labels.decode(y[i, 0, :ylen[i]].tolist())) ] for i in range(len(decoded))] hyp_segments = [ labels.decode(decoded[i], ts[i], channel=channel[i], replace_blank=True, replace_blank_series=args.replace_blank_series, replace_repeat=True, replace_space=False, speaker=speaker[i] if isinstance( speaker[i], str) else None) for i in range(len(decoded)) ] ref, hyp = '\n'.join( transcripts.join(ref=r) for r in ref_segments).strip(), '\n'.join( transcripts.join(hyp=h) for h in hyp_segments).strip() if args.verbose: print('HYP:', hyp) print('CER: {cer:.02%}'.format(cer=metrics.cer(hyp=hyp, ref=ref))) tic_alignment = time.time() if args.align and y.numel() > 0: #if ref_full:# and not ref: # #assert len(set(t['channel'] for t in meta)) == 1 or all(t['type'] != 'channel' for t in meta) # #TODO: add space at the end # channel = torch.ByteTensor(channel).repeat_interleave(log_probs.shape[-1]).reshape(1, -1) # ts = ts.reshape(1, -1) # log_probs = log_probs.transpose(0, 1).unsqueeze(0).flatten(start_dim = -2) # olen = torch.tensor([log_probs.shape[-1]], device = log_probs.device, dtype = torch.long) # y = y_full[None, None, :].to(y.device) # ylen = torch.tensor([[y.shape[-1]]], device = log_probs.device, dtype = torch.long) # segments = [([], sum([h for r, h in segments], []))] alignment = ctc.alignment( log_probs.permute(2, 0, 1), y.squeeze(1), olen, ylen.squeeze(1), blank=labels.blank_idx, pack_backpointers=args.pack_backpointers) ref_segments = [ labels.decode(y[i, 0, :ylen[i]].tolist(), ts[i], alignment[i], channel=channel[i], speaker=speaker[i], key='ref', speakers=val_dataset.speakers) for i in range(len(decoded)) ] oom_handler.reset() except: if oom_handler.try_recover(model.parameters()): print(f'Skipping {i} / {num_examples}') continue else: raise print('Alignment time: {:.02f} sec'.format(time.time() - tic_alignment)) if args.max_segment_duration: ref_transcript, hyp_transcript = [ list(sorted(sum(segments, []), key=transcripts.sort_key)) for segments in [ref_segments, hyp_segments] ] if ref: ref_segments = list( transcripts.segment(ref_transcript, args.max_segment_duration)) hyp_segments = list( transcripts.segment(hyp_transcript, ref_segments)) else: hyp_segments = list( transcripts.segment(hyp_transcript, args.max_segment_duration)) ref_segments = [[] for _ in hyp_segments] transcript = [ dict(audio_path=audio_path, ref=ref, hyp=hyp, speaker=transcripts.speaker(ref=ref_transcript, hyp=hyp_transcript), cer=metrics.cer(hyp=hyp, ref=ref), words=metrics.align_words(hyp=hyp, ref=ref)[-1] if args.align_words else [], alignment=dict(ref=ref_transcript, hyp=hyp_transcript), **transcripts.summary(hyp_transcript)) for ref_transcript, hyp_transcript in zip(ref_segments, hyp_segments) for ref, hyp in [(transcripts.join(ref=ref_transcript), transcripts.join(hyp=hyp_transcript))] ] filtered_transcript = list( transcripts.prune(transcript, align_boundary_words=args.align_boundary_words, cer=args.cer, duration=args.duration, gap=args.gap, unk=args.unk, num_speakers=args.num_speakers)) print('Filtered segments:', len(filtered_transcript), 'out of', len(transcript)) if args.output_json: transcript_path = os.path.join(args.output_path, audio_name + '.json') print(transcript_path) with open(transcript_path, 'w') as f: json.dump(filtered_transcript, f, ensure_ascii=False, sort_keys=True, indent=2) if args.output_html: transcript_path = os.path.join(args.output_path, audio_name + '.html') print(transcript_path) vis.transcript(transcript_path, args.sample_rate, args.mono, transcript, filtered_transcript) if args.output_txt: transcript_path = os.path.join(args.output_path, audio_name + '.txt') print(transcript_path) with open(transcript_path, 'w') as f: f.write(hyp) if args.output_csv: output_lines.append( csv_sep.join((audio_path, hyp, str(begin), str(end))) + '\n') print('Done: {:.02f} sec\n'.format(time.time() - tic)) if args.output_csv: with open(os.path.join(args.output_path, 'transcripts.csv'), 'w') as f: f.writelines(output_lines)
def ref(input_path, output_path, sample_rate, window_size, device, max_duration, debug_audio, html, ext): os.makedirs(output_path, exist_ok=True) audio_source = ([ (input_path, audio_name) for audio_name in os.listdir(input_path) ] if os.path.isdir(input_path) else [(os.path.dirname(input_path), os.path.basename(input_path))]) for i, (input_path, audio_name) in enumerate(audio_source): print(i, '/', len(audio_source), audio_name) audio_path = os.path.join(input_path, audio_name) noextname = audio_name[:-len(ext)] transcript_path = os.path.join(output_path, noextname + '.json') rttm_path = os.path.join(output_path, noextname + '.rttm') signal, sample_rate = audio.read_audio(audio_path, sample_rate=sample_rate, mono=False, dtype='float32', duration=max_duration) speaker_id_ref, speaker_id_ref_ = select_speaker( signal.to(device), silence_absolute_threshold=0.05, silence_relative_threshold=0.2, kernel_size_smooth_signal=128, kernel_size_smooth_speaker=4096, kernel_size_smooth_silence=4096) transcript = [ dict(audio_path=audio_path, begin=float(begin) / sample_rate, end=(float(begin) + float(duration)) / sample_rate, speaker=speaker, speaker_name=transcripts.default_speaker_names[speaker]) for speaker in range(1, len(speaker_id_ref_)) for begin, duration, mask in zip( *models.rle1d(speaker_id_ref_[speaker])) if mask == 1 ] #transcript = [dict(audio_path = audio_path, begin = float(begin) / sample_rate, end = (float(begin) + float(duration)) / sample_rate, speaker_name = str(int(speaker)), speaker = int(speaker)) for begin, duration, speaker in zip(*models.rle1d(speaker_id_ref.cpu()))] transcript_without_speaker_missing = [ t for t in transcript if t['speaker'] != transcripts.speaker_missing ] transcripts.save(transcript_path, transcript_without_speaker_missing) print(transcript_path) transcripts.save(rttm_path, transcript_without_speaker_missing) print(rttm_path) if debug_audio: audio.write_audio( transcript_path + '.wav', torch.cat([ signal[..., :speaker_id_ref.shape[-1]], convert_speaker_id(speaker_id_ref[..., :signal.shape[-1]], to_bipole=True).unsqueeze(0).cpu() * 0.5, speaker_id_ref_[..., :signal.shape[-1]].cpu() * 0.5 ]), sample_rate, mono=False) print(transcript_path + '.wav') if html: html_path = os.path.join(output_path, audio_name + '.html') vis.transcript(html_path, sample_rate=sample_rate, mono=True, transcript=transcript, duration=max_duration)
audio_source = ([ (args.input_path, audio_name) for audio_name in os.listdir(args.input_path) ] if os.path.isdir(args.input_path) else [(os.path.dirname(args.input_path), os.path.basename(args.input_path))]) for i, (input_path, audio_name) in enumerate(audio_source): print(i, '/', len(audio_source), audio_name) audio_path = os.path.join(input_path, audio_name) transcript_path = os.path.join(args.output_path, audio_name + '.json') html_path = os.path.join(args.output_path, audio_name + '.html') diarization = pipeline(dict(audio=audio_path)) transcript = [ dict(audio_path=audio_path, begin=turn.start, end=turn.end, speaker_name=speaker) for turn, _, speaker in diarization.itertracks(yield_label=True) ] speaker_names = [None] + list(set(t['speaker_name'] for t in transcript)) for t in transcript: t['speaker'] = speaker_names.index(t['speaker_name']) json.dump(transcript, open(transcript_path, 'w'), indent=2, sort_keys=True) vis.transcript(html_path, sample_rate=args.sample_rate, mono=True, transcript=transcript_path)