def test_stim_history_tracking(): video = VideoStim(join(get_test_data_path(), 'video', 'obama_speech.mp4')) assert video.history is None conv = VideoToAudioConverter() stim = conv.transform(video) assert str(stim.history) == 'VideoStim->VideoToAudioConverter/AudioStim' conv = WitTranscriptionConverter() stim = conv.transform(stim) assert str( stim.history) == 'VideoStim->VideoToAudioConverter/AudioStim->WitTranscriptionConverter/ComplexTextStim'
def test_stim_history_tracking(): video = VideoStim(join(get_test_data_path(), 'video', 'obama_speech.mp4')) assert video.history is None conv = VideoToAudioConverter() stim = conv.transform(video) assert str(stim.history) == 'VideoStim->VideoToAudioConverter/AudioStim' conv = WitTranscriptionConverter() stim = conv.transform(stim) assert str( stim.history ) == 'VideoStim->VideoToAudioConverter/AudioStim->WitTranscriptionConverter/ComplexTextStim'
def clean_transcript(input_transcript, input_media, onset=None, offset=None): stim = load_stims([input_media])[0] if not isinstance(stim, AudioStim): conv = VideoToAudioConverter() stim = conv.transform(stim) input_media = '/tmp/input_audio.wav' stim.save(input_media) _, extension = splitext(input_transcript) clean_transcript = '/tmp/clean_transcript.txt' with open(clean_transcript, 'w') as new_file: if extension == 'srt': txt = ComplexTextStim(input_transcript) for el in txt.elements: _clean_save(el.text, new_file, el.onset, el.duration) else: # Treat as a singe block of text if onset is None or offset is None: raise Exception("Onset and offset must be declared") txt = TextStim(input_transcript) _clean_save(txt.text, new_file, onset, stim.duration - offset) return clean_transcript, input_media
def test_video_to_audio_converter(): filename = join(get_test_data_path(), 'video', 'small.mp4') video = VideoStim(filename) conv = VideoToAudioConverter() audio = conv.transform(video) assert audio.history.source_class == 'VideoStim' assert audio.history.source_file == filename assert np.isclose(video.duration, audio.duration, 1e-2)
def test_video_to_audio_converter(): filename = join(VIDEO_DIR, 'small.mp4') video = VideoStim(filename, onset=4.2) conv = VideoToAudioConverter() audio = conv.transform(video) assert audio.history.source_class == 'VideoStim' assert audio.history.source_file == filename assert audio.onset == 4.2 assert np.isclose(video.duration, audio.duration, 1e-2)
def test_video_to_audio_converter(): filename = join(VIDEO_DIR, 'small.mp4') video = VideoStim(filename, onset=4.2) conv = VideoToAudioConverter() audio = conv.transform(video) assert audio.history.source_class == 'VideoStim' assert audio.history.source_file == filename assert audio.onset == 4.2 assert audio.sampling_rate == 48000 assert np.isclose(video.duration, audio.duration, 1e-2) filename = join(VIDEO_DIR, 'obama_speech.mp4') video = VideoStim(filename, onset=1.0) audio = conv.transform(video) assert audio.history.source_class == 'VideoStim' assert audio.history.source_file == filename assert audio.onset == 1.0 assert audio.sampling_rate == 24000 assert np.isclose(video.duration, audio.duration, 1e-2)