Ejemplo n.º 1
0
def test_witaiAPI_converter():
    audio_dir = join(get_test_data_path(), 'audio')
    stim = AudioStim(join(audio_dir, 'homer.wav'))
    conv = WitTranscriptionConverter()
    out_stim = conv.transform(stim)
    assert type(out_stim) == ComplexTextStim
    first_word = next(w for w in out_stim)
    assert type(first_word) == TextStim
    #assert '_' in first_word.name
    text = [elem.text for elem in out_stim]
    assert 'thermodynamics' in text or 'obey' in text
Ejemplo n.º 2
0
def test_WitTranscriptionConverter():
    stim = AudioStim(join(AUDIO_DIR, 'homer.wav'), onset=4.2)
    conv = WitTranscriptionConverter()
    out_stim = conv.transform(stim)
    assert type(out_stim) == ComplexTextStim
    first_word = next(w for w in out_stim)
    assert type(first_word) == TextStim
    assert first_word.onset == 4.2
    second_word = [w for w in out_stim][1]
    assert second_word.onset == 4.2
    text = [elem.text for elem in out_stim]
    assert 'thermodynamics' in text or 'obey' in text
Ejemplo n.º 3
0
def test_big_pipeline():
    pytest.importorskip('pygraphviz')
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingFilter(every=15), [
        (TesseractConverter(), [LengthExtractor()]),
        VibranceExtractor(),
        'BrightnessExtractor',
    ])]
    audio_nodes = [(VideoToAudioConverter(),
                    [WitTranscriptionConverter(),
                     'LengthExtractor'], 'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    results = graph.run(video, merge=False)
    result = merge_results(results, format='wide', extractor_names='multi')
    # Test that pygraphviz outputs a file
    drawfile = next(tempfile._get_candidate_names())
    graph.draw(drawfile)
    assert exists(drawfile)
    os.remove(drawfile)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim_name'].values
    assert 'frame[90]' in result['stim_name'].values
Ejemplo n.º 4
0
def test_WitTranscriptionConverter():
    stim = AudioStim(join(AUDIO_DIR, 'obama_speech.wav'), onset=4.2)
    conv = WitTranscriptionConverter()
    assert conv.validate_keys()
    out_stim = conv.transform(stim)
    assert type(out_stim) == ComplexTextStim
    first_word = next(w for w in out_stim)
    assert type(first_word) == TextStim
    assert first_word.onset == 4.2
    second_word = [w for w in out_stim][1]
    assert second_word.onset == 4.2
    text = [elem.text for elem in out_stim]
    assert 'today' in text or 'negotiations' in text

    conv = WitTranscriptionConverter(api_key='nogood')
    assert not conv.validate_keys()
Ejemplo n.º 5
0
def test_stim_history_tracking():
    video = VideoStim(join(get_test_data_path(), 'video', 'obama_speech.mp4'))
    assert video.history is None
    conv = VideoToAudioConverter()
    stim = conv.transform(video)
    assert str(stim.history) == 'VideoStim->VideoToAudioConverter/AudioStim'
    conv = WitTranscriptionConverter()
    stim = conv.transform(stim)
    assert str(
        stim.history
    ) == 'VideoStim->VideoToAudioConverter/AudioStim->WitTranscriptionConverter/ComplexTextStim'
Ejemplo n.º 6
0
def test_WitTranscriptionConverter():
    stim = AudioStim(join(AUDIO_DIR, 'obama_speech.wav'), onset=4.2)
    conv = WitTranscriptionConverter()
    assert conv.validate_keys()
    out_stim = conv.transform(stim)
    assert type(out_stim) == ComplexTextStim
    first_word = next(w for w in out_stim)
    assert type(first_word) == TextStim
    assert first_word.onset == 4.2
    second_word = [w for w in out_stim][1]
    assert second_word.onset == 4.2
    text = [elem.text for elem in out_stim]
    assert 'today' in text or 'negotiations' in text

    conv = WitTranscriptionConverter(api_key='nogood')
    assert not conv.validate_keys()
Ejemplo n.º 7
0
def test_big_pipeline():
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingConverter(every=15), [
                        (TesseractConverter(), [LengthExtractor()]),
                        VibranceExtractor(), 'BrightnessExtractor',
                    ])]
    audio_nodes = [(VideoToAudioConverter(), [
                        WitTranscriptionConverter(), 'LengthExtractor'],
                        'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    result = graph.run(video)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim'].values
    assert 'frame[90]' in result['stim'].values