Example #1
0
def test_tesseract_converter():
    pytest.importorskip('pytesseract')
    stim = ImageStim(join(IMAGE_DIR, 'button.jpg'), onset=4.2)
    conv = TesseractConverter()
    out_stim = conv.transform(stim)
    assert out_stim.name == 'text[Exit]'
    assert out_stim.history.source_class == 'ImageStim'
    assert out_stim.history.source_name == 'button.jpg'
    assert out_stim.onset == 4.2
Example #2
0
def test_tesseract_converter():
    pytest.importorskip('pytesseract')
    image_dir = join(get_test_data_path(), 'image')
    stim = ImageStim(join(image_dir, 'button.jpg'))
    conv = TesseractConverter()
    out_stim = conv.transform(stim)
    assert out_stim.name == 'text[Exit]'
    assert out_stim.history.source_class == 'ImageStim'
    assert out_stim.history.source_name == 'button.jpg'
Example #3
0
def test_big_pipeline():
    pytest.importorskip('pygraphviz')
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingFilter(every=15), [
        (TesseractConverter(), [LengthExtractor()]),
        VibranceExtractor(),
        'BrightnessExtractor',
    ])]
    audio_nodes = [(VideoToAudioConverter(),
                    [WitTranscriptionConverter(),
                     'LengthExtractor'], 'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    results = graph.run(video, merge=False)
    result = merge_results(results, format='wide', extractor_names='multi')
    # Test that pygraphviz outputs a file
    drawfile = next(tempfile._get_candidate_names())
    graph.draw(drawfile)
    assert exists(drawfile)
    os.remove(drawfile)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim_name'].values
    assert 'frame[90]' in result['stim_name'].values
Example #4
0
def test_small_pipeline():
    pytest.importorskip('pytesseract')
    filename = join(get_test_data_path(), 'image', 'button.jpg')
    stim = ImageStim(filename)
    nodes = [(TesseractConverter(), [LengthExtractor()])]
    graph = Graph(nodes)
    result = list(graph.run([stim], merge=False))
    history = result[0].history.to_df()
    assert history.shape == (2, 8)
    assert history.iloc[0]['result_class'] == 'TextStim'
    result = merge_results(result, format='wide', extractor_names='prepend')
    assert (0, 'text[Exit]') in result['stim_name'].values
    assert 'LengthExtractor#text_length' in result.columns
    assert result['LengthExtractor#text_length'].values[0] == 4
Example #5
0
def test_big_pipeline():
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingConverter(every=15), [
                        (TesseractConverter(), [LengthExtractor()]),
                        VibranceExtractor(), 'BrightnessExtractor',
                    ])]
    audio_nodes = [(VideoToAudioConverter(), [
                        WitTranscriptionConverter(), 'LengthExtractor'],
                        'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    result = graph.run(video)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim'].values
    assert 'frame[90]' in result['stim'].values