def test_add_nested_children(): graph = Graph() de1, de2, de3 = DummyExtractor(), DummyExtractor(), DummyExtractor() graph.add_nodes([de1, (de2, [(de3, [], "child's child")], 'child')]) assert len(graph.roots) == 2 assert isinstance(graph.roots[1].children[0], Node) assert graph.roots[1].children[0].name == "child's child"
def test_big_pipeline(): pytest.importorskip('pygraphviz') filename = join(get_test_data_path(), 'video', 'obama_speech.mp4') video = VideoStim(filename) visual_nodes = [(FrameSamplingFilter(every=15), [ (TesseractConverter(), [LengthExtractor()]), VibranceExtractor(), 'BrightnessExtractor', ])] audio_nodes = [(VideoToAudioConverter(), [WitTranscriptionConverter(), 'LengthExtractor'], 'video_to_audio')] graph = Graph() graph.add_nodes(visual_nodes) graph.add_nodes(audio_nodes) results = graph.run(video, merge=False) result = merge_results(results, format='wide', extractor_names='multi') # Test that pygraphviz outputs a file drawfile = next(tempfile._get_candidate_names()) graph.draw(drawfile) assert exists(drawfile) os.remove(drawfile) assert ('LengthExtractor', 'text_length') in result.columns assert ('VibranceExtractor', 'vibrance') in result.columns # assert not result[('onset', '')].isnull().any() assert 'text[negotiations]' in result['stim_name'].values assert 'frame[90]' in result['stim_name'].values
def test_stim_results(): stim = TextStim(text='some, example the text.') g = Graph() g.add_nodes( ['PunctuationRemovalFilter', 'TokenRemovalFilter', 'TokenizingFilter'], mode='vertical') final_stims = g.run(stim, merge=False) assert len(final_stims) == 2 assert final_stims[1].text == 'text' n = Node('PunctuationRemovalFilter', name='punc') g = Graph([n]) g.add_nodes(['TokenizingFilter', 'LengthExtractor'], parent=n) results = g.run(stim) assert isinstance(results, pd.DataFrame) assert results['LengthExtractor#text_length'][0] == 21 with pytest.raises(ValueError): g.run(stim, invalid_results='fail')
def test_big_pipeline(): filename = join(get_test_data_path(), 'video', 'obama_speech.mp4') video = VideoStim(filename) visual_nodes = [(FrameSamplingConverter(every=15), [ (TesseractConverter(), [LengthExtractor()]), VibranceExtractor(), 'BrightnessExtractor', ])] audio_nodes = [(VideoToAudioConverter(), [ WitTranscriptionConverter(), 'LengthExtractor'], 'video_to_audio')] graph = Graph() graph.add_nodes(visual_nodes) graph.add_nodes(audio_nodes) result = graph.run(video) assert ('LengthExtractor', 'text_length') in result.columns assert ('VibranceExtractor', 'vibrance') in result.columns # assert not result[('onset', '')].isnull().any() assert 'text[negotiations]' in result['stim'].values assert 'frame[90]' in result['stim'].values
def test_adding_nodes(): graph = Graph() graph.add_children(['VibranceExtractor', 'BrightnessExtractor']) assert len(graph.roots) == 2 assert len(graph.nodes) == 2 for r in graph.roots: assert len(r.children) == 0 img = ImageStim(join(get_test_data_path(), 'image', 'button.jpg')) results = graph.run(img, merge=False) assert len(results) == 2 assert_almost_equal(results[0].to_df()['vibrance'][0], 841.577274, 5) assert_almost_equal(results[1].to_df()['brightness'][0], 0.746965, 5) graph = Graph() graph.add_chain(['PunctuationRemovalFilter', 'LengthExtractor']) txt = TextStim(text='the.best.text.') results = graph.run(txt, merge=False) assert len(results) == 1 assert results[0].to_df()['text_length'][0] == 11 with pytest.raises(ValueError): graph.add_nodes(['LengthExtractor'], mode='invalid')
def test_add_children(): graph = Graph() de1, de2, de3 = DummyExtractor(), DummyExtractor(), DummyExtractor() graph.add_nodes([de1, de2, de3]) assert len(graph.roots) == 3 assert all([isinstance(c, Node) for c in graph.roots])