def test_transformations_on_compound_stim(): image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg')) image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg')) text = ComplexTextStim(text="The quick brown fox jumped...") stim = CompoundStim([image1, image2, text]) ext = BrightnessExtractor() results = ext.transform(stim) assert len(results) == 2 assert np.allclose(results[0]._data[0], 0.88784294)
def test_transformations_on_compound_stim(): image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg')) image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg')) text = ComplexTextStim(text="The quick brown fox jumped...") stim = CompoundStim([image1, image2, text]) ext = BrightnessExtractor() results = ext.transform(stim) assert len(results) == 2 assert np.allclose(results[0].data[0], 0.88784294)
def test_brightness_extractor(): stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'), onset=4.2, duration=1) result = BrightnessExtractor().transform(stim).to_df() brightness = result['brightness'][0] assert np.isclose(brightness, 0.88784294, 1e-5) assert result['onset'][0] == 4.2 assert result['duration'][0] == 1
def test_graph_smoke_test(): filename = join(get_test_data_path(), 'image', 'obama.jpg') stim = ImageStim(filename) nodes = [(BrightnessExtractor(), [], 'brightness_node')] graph = Graph(nodes) result = graph.run(stim, format='wide', extractor_names='multi') brightness = result[('brightness_node', 'brightness')].values[0] assert_almost_equal(brightness, 0.556134, 5)
def test_brightness_extractor(): image_dir = join(get_test_data_path(), 'image') stim = ImageStim(join(image_dir, 'apple.jpg'), onset=4.2, duration=1) result = BrightnessExtractor().transform(stim).to_df() brightness = result['brightness'][0] assert np.isclose(brightness, 0.88784294, 1e-5) assert result['onset'][0] == 4.2 assert result['duration'][0] == 1
def test_validation_levels(caplog): cache_default = config.get_option('cache_transformers') config.set_option('cache_transformers', False) ext = BrightnessExtractor() stim = TextStim(text='hello world') with pytest.raises(TypeError): ext.transform(stim) res = ext.transform(stim, validation='warn') log_message = caplog.records[0].message assert log_message == ( "Transformers of type BrightnessExtractor can " "only be applied to stimuli of type(s) <class 'pliers" ".stimuli.image.ImageStim'> (not type TextStim), and no " "applicable Converter was found.") assert not res res = ext.transform(stim, validation='loose') assert not res stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg')) res = ext.transform([stim, stim2], validation='loose') assert len(res) == 1 assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5) config.set_option('cache_transformers', cache_default)
def test_parallelization(): # TODO: test that parallelization actually happened (this will likely # require some new logging functionality, or introspection). For now we # just make sure the parallelized version produces the same result. default = config.get_option('parallelize') filename = join(get_test_data_path(), 'video', 'small.mp4') video = VideoStim(filename) ext = BrightnessExtractor() # With parallelization config.set_option('parallelize', True) result1 = ext.transform(video) # Without parallelization config.set_option('parallelize', False) result2 = ext.transform(video) assert result1 == result2 config.set_option('parallelize', default)
def test_validation_levels(caplog): cache_default = config.get_option('cache_transformers') config.set_option('cache_transformers', False) ext = BrightnessExtractor() stim = TextStim(text='hello world') with pytest.raises(TypeError): ext.transform(stim) res = ext.transform(stim, validation='warn') log_message = caplog.records[0].message assert log_message == ("Transformers of type BrightnessExtractor can " "only be applied to stimuli of type(s) <class 'pliers" ".stimuli.image.ImageStim'> (not type TextStim), and no " "applicable Converter was found.") assert not res res = ext.transform(stim, validation='loose') assert not res stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg')) res = ext.transform([stim, stim2], validation='loose') assert len(res) == 1 assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5) config.set_option('cache_transformers', cache_default)
def test_small_pipeline2(): filename = join(get_test_data_path(), 'image', 'button.jpg') nodes = [BrightnessExtractor(), VibranceExtractor()] graph = Graph(nodes) result = list(graph.run([filename], merge=False)) history = result[0].history.to_df() assert history.shape == (1, 8) result = merge_results(result, format='wide', extractor_names='multi') assert ('BrightnessExtractor', 'brightness') in result.columns brightness = result[('BrightnessExtractor', 'brightness')].values[0] vibrance = result[('VibranceExtractor', 'vibrance')].values[0] assert_almost_equal(brightness, 0.746965, 5) assert ('VibranceExtractor', 'vibrance') in result.columns assert_almost_equal(vibrance, 841.577274, 5)
def test_parallelization(): # TODO: test that parallelization actually happened (this will likely # require some new logging functionality, or introspection). For now we # just make sure the parallelized version produces the same result. default = config.get_option('parallelize') cache_default = config.get_option('cache_transformers') config.set_option('cache_transformers', True) filename = join(get_test_data_path(), 'video', 'small.mp4') video = VideoStim(filename) ext = BrightnessExtractor() # With parallelization config.set_option('parallelize', True) result1 = ext.transform(video) # Without parallelization config.set_option('parallelize', False) result2 = ext.transform(video) assert result1 == result2 config.set_option('parallelize', default) config.set_option('cache_transformers', cache_default)
def test_extractor_scikit(): pytest.importorskip('sklearn') image_dir = join(get_test_data_path(), 'image') stim = ImageStim(join(image_dir, 'apple.jpg')) ext = BrightnessExtractor() trans = PliersTransformer(ext) res = trans.fit_transform(stim) assert res.shape == (1, 1) assert np.isclose(res[0][0], 0.88784294, 1e-5) meta = trans.metadata_ assert np.isnan(meta['onset'][0]) trans = PliersTransformer('BrightnessExtractor') res = trans.fit_transform(stim) assert res.shape == (1, 1) assert np.isclose(res[0][0], 0.88784294, 1e-5) meta = trans.metadata_ assert np.isnan(meta['onset'][0])
def test_within_pipeline(): pytest.importorskip('cv2') pytest.importorskip('sklearn') from sklearn.pipeline import Pipeline from sklearn.preprocessing import Normalizer stim = join(get_test_data_path(), 'image', 'apple.jpg') graph = Graph([BrightnessExtractor(), SharpnessExtractor()]) trans = PliersTransformer(graph) normalizer = Normalizer() pipeline = Pipeline([('pliers', trans), ('normalizer', normalizer)]) res = pipeline.fit_transform(stim) assert res.shape == (1, 2) assert np.isclose(res[0][0], 0.66393, 1e-5) assert np.isclose(res[0][1], 0.74780, 1e-5) meta = trans.metadata_ assert 'onset' in meta.columns assert meta['class'][0] == 'ImageStim'
def test_merge_extractor_results_by_features(): np.random.seed(100) image_dir = join(get_test_data_path(), 'image') stim = ImageStim(join(image_dir, 'apple.jpg')) # Merge results for static Stims (no onsets) extractors = [BrightnessExtractor(), VibranceExtractor()] results = [e.transform(stim) for e in extractors] df = ExtractorResult.merge_features(results) de = DummyExtractor() de_names = ['Extractor1', 'Extractor2', 'Extractor3'] results = [de.transform(stim, name) for name in de_names] df = ExtractorResult.merge_features(results) assert df.shape == (177, 14) assert df.columns.levels[1].unique().tolist() == ['duration', 0, 1, 2, ''] cols = cols = ['onset', 'class', 'filename', 'history', 'stim'] assert df.columns.levels[0].unique().tolist() == de_names + cols
def test_validation_levels(capsys): ext = BrightnessExtractor() stim = TextStim(text='hello world') with pytest.raises(TypeError): ext.transform(stim) res = ext.transform(stim, validation='warn') assert not res out, err = capsys.readouterr() assert err == "WARNING:root:Transformers of type BrightnessExtractor can "\ "only be applied to stimuli of type(s) <class 'pliers"\ ".stimuli.image.ImageStim'> (not type TextStim), and no "\ "applicable Converter was found.\n" res = ext.transform(stim, validation='loose') assert not res stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg')) res = ext.transform([stim, stim2], validation='loose') assert len(res) == 1 assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5)
def test_twitter(): # Test stim creation pytest.importorskip('twitter') factory = TweetStimFactory() status_id = 821442726461931521 pliers_tweet = factory.get_status(status_id) assert isinstance(pliers_tweet, TweetStim) assert isinstance(pliers_tweet, CompoundStim) assert len(pliers_tweet.elements) == 1 status_id = 884392294014746624 ut_tweet = factory.get_status(status_id) assert len(ut_tweet.elements) == 2 # Test extraction ext = LengthExtractor() res = ext.transform(pliers_tweet)[0].to_df() assert res['text_length'][0] == 104 # Test image extraction ext = BrightnessExtractor() res = ext.transform(ut_tweet)[0].to_df() brightness = res['brightness'][0] assert np.isclose(brightness, 0.54057, 1e-5)
def test_node_init(): n = Node(BrightnessExtractor(), 'my_node') assert isinstance(n.transformer, BrightnessExtractor) assert n.name == 'my_node' n = Node('brightnessextractor', 'my_node') assert isinstance(n.transformer, BrightnessExtractor)
def test_transform_with_string_input(): ext = BrightnessExtractor() res = ext.transform(join(get_test_data_path(), 'image', 'apple.jpg')) np.testing.assert_almost_equal(res.to_df()['brightness'].values[0], 0.887842942)
def test_versioning(): ext = DummyBatchExtractor() assert ext.VERSION == '0.1' ext = BrightnessExtractor() assert ext.VERSION >= '1.0'
def extract_visual_features(video_file): """ This function extracts luminance, vibrance, saliency, and sharpness from the frames of a video using the pliers library. If you use this function, please cite the pliers library directly: https://github.com/PsychoinformaticsLab/pliers#how-to-cite Parameters ---------- video_file: str Path to video file to analyze. Returns ------- low_level_video_df: DataFrame Pandas dataframe with a column per low-level feature.py (index is time). """ # extract video luminance print('Extracting brightness...') from pliers.extractors import BrightnessExtractor brightext = BrightnessExtractor() brightres = brightext.transform(video_file) brightres_df = pd.DataFrame(columns=brightres[0].to_df().columns) for a, ob in enumerate(brightres): t = ob.to_df() t['order'] = a brightres_df = brightres_df.append(t, ignore_index=True) # extract saliency print('Extracting saliency...') from pliers.extractors import SaliencyExtractor salext = SaliencyExtractor() salres = salext.transform(video_file) salres_df = pd.DataFrame(columns=salres[0].to_df().columns) for a, ob in enumerate(salres): t = ob.to_df() t['order'] = a salres_df = salres_df.append(t, ignore_index=True) # extract sharpness print('Extracting sharpness...') from pliers.extractors import SharpnessExtractor sharpext = SharpnessExtractor() sharpres = sharpext.transform(video_file) sharpres_df = pd.DataFrame(columns=sharpres[0].to_df().columns) for a, ob in enumerate(sharpres): t = ob.to_df() t['order'] = a sharpres_df = sharpres_df.append(t, ignore_index=True) # extract vibrance print('Extracting vibrance...') from pliers.extractors import VibranceExtractor vibext = VibranceExtractor() vibres = vibext.transform(video_file) vibres_df = pd.DataFrame(columns=vibres[0].to_df().columns) for a, ob in enumerate(vibres): t = ob.to_df() t['order'] = a vibres_df = vibres_df.append(t, ignore_index=True) # combine into 1 dataframe print('Combining data...') low_level_video_df = brightres_df.merge(salres_df[salres_df.columns[4:]], left_index=True, right_index=True) low_level_video_df = low_level_video_df.merge( sharpres_df[sharpres_df.columns[4:]], left_index=True, right_index=True) low_level_video_df = low_level_video_df.merge( vibres_df[vibres_df.columns[4:]], left_index=True, right_index=True) low_level_video_df['onset_ms'] = low_level_video_df['onset'] * 1000 low_level_video_df.index = pd.to_datetime(low_level_video_df['onset_ms'], unit='ms') low_level_video_df = low_level_video_df.drop( ['max_saliency', 'max_y', 'max_x', 'onset', 'object_id', 'order'], axis=1) low_level_video_df.index.name = None print('Visual feature extraction complete.') return low_level_video_df