예제 #1
0
def test_validation_levels(caplog):
    cache_default = config.get_option('cache_transformers')
    config.set_option('cache_transformers', False)

    ext = BrightnessExtractor()
    stim = TextStim(text='hello world')
    with pytest.raises(TypeError):
        ext.transform(stim)
    res = ext.transform(stim, validation='warn')

    log_message = caplog.records[0].message
    assert log_message == ("Transformers of type BrightnessExtractor can "
                  "only be applied to stimuli of type(s) <class 'pliers"
                  ".stimuli.image.ImageStim'> (not type TextStim), and no "
                  "applicable Converter was found.")
    assert not res

    res = ext.transform(stim, validation='loose')
    assert not res
    stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    res = ext.transform([stim, stim2], validation='loose')
    assert len(res) == 1
    assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5)

    config.set_option('cache_transformers', cache_default)
예제 #2
0
def test_validation_levels(caplog):
    cache_default = config.get_option('cache_transformers')
    config.set_option('cache_transformers', False)

    ext = BrightnessExtractor()
    stim = TextStim(text='hello world')
    with pytest.raises(TypeError):
        ext.transform(stim)
    res = ext.transform(stim, validation='warn')

    log_message = caplog.records[0].message
    assert log_message == (
        "Transformers of type BrightnessExtractor can "
        "only be applied to stimuli of type(s) <class 'pliers"
        ".stimuli.image.ImageStim'> (not type TextStim), and no "
        "applicable Converter was found.")
    assert not res

    res = ext.transform(stim, validation='loose')
    assert not res
    stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    res = ext.transform([stim, stim2], validation='loose')
    assert len(res) == 1
    assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5)

    config.set_option('cache_transformers', cache_default)
예제 #3
0
def test_validation_levels(capsys):
    ext = BrightnessExtractor()
    stim = TextStim(text='hello world')
    with pytest.raises(TypeError):
        ext.transform(stim)
    res = ext.transform(stim, validation='warn')
    assert not res
    out, err = capsys.readouterr()
    assert err == "WARNING:root:Transformers of type BrightnessExtractor can "\
                  "only be applied to stimuli  of type(s) <class 'pliers"\
                  ".stimuli.image.ImageStim'> (not type TextStim), and no "\
                  "applicable Converter was found.\n"
    res = ext.transform(stim, validation='loose')
    assert not res
    stim2 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    res = ext.transform([stim, stim2], validation='loose')
    assert len(res) == 1
    assert np.isclose(res[0].to_df()['brightness'][0], 0.88784294, 1e-5)
예제 #4
0
def test_transformations_on_compound_stim():
    image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    text = ComplexTextStim(text="The quick brown fox jumped...")
    stim = CompoundStim([image1, image2, text])

    ext = BrightnessExtractor()
    results = ext.transform(stim)
    assert len(results) == 2
    assert np.allclose(results[0].data[0], 0.88784294)
예제 #5
0
def test_transformations_on_compound_stim():
    image1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    image2 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    text = ComplexTextStim(text="The quick brown fox jumped...")
    stim = CompoundStim([image1, image2, text])

    ext = BrightnessExtractor()
    results = ext.transform(stim)
    assert len(results) == 2
    assert np.allclose(results[0]._data[0], 0.88784294)
예제 #6
0
def test_parallelization():
    # TODO: test that parallelization actually happened (this will likely
    # require some new logging functionality, or introspection). For now we
    # just make sure the parallelized version produces the same result.
    default = config.get_option('parallelize')

    filename = join(get_test_data_path(), 'video', 'small.mp4')
    video = VideoStim(filename)
    ext = BrightnessExtractor()

    # With parallelization
    config.set_option('parallelize', True)
    result1 = ext.transform(video)

    # Without parallelization
    config.set_option('parallelize', False)
    result2 = ext.transform(video)

    assert result1 == result2
    config.set_option('parallelize', default)
예제 #7
0
def test_parallelization():
    # TODO: test that parallelization actually happened (this will likely
    # require some new logging functionality, or introspection). For now we
    # just make sure the parallelized version produces the same result.
    default = config.get_option('parallelize')
    cache_default = config.get_option('cache_transformers')
    config.set_option('cache_transformers', True)

    filename = join(get_test_data_path(), 'video', 'small.mp4')
    video = VideoStim(filename)
    ext = BrightnessExtractor()

    # With parallelization
    config.set_option('parallelize', True)
    result1 = ext.transform(video)

    # Without parallelization
    config.set_option('parallelize', False)
    result2 = ext.transform(video)

    assert result1 == result2
    config.set_option('parallelize', default)
    config.set_option('cache_transformers', cache_default)
예제 #8
0
def test_transform_with_string_input():
    ext = BrightnessExtractor()
    res = ext.transform(join(get_test_data_path(), 'image', 'apple.jpg'))
    np.testing.assert_almost_equal(res.to_df()['brightness'].values[0],
                                   0.887842942)
예제 #9
0
def extract_visual_features(video_file):
    """
    This function extracts luminance, vibrance, saliency, and sharpness from the frames of a video
    using the pliers library. If you use this function, please cite the pliers library directly:
    https://github.com/PsychoinformaticsLab/pliers#how-to-cite

    Parameters
    ----------
    video_file: str
        Path to video file to analyze.

    Returns
    -------
    low_level_video_df: DataFrame
        Pandas dataframe with a column per low-level feature.py (index is time).
    """

    # extract video luminance
    print('Extracting brightness...')
    from pliers.extractors import BrightnessExtractor
    brightext = BrightnessExtractor()
    brightres = brightext.transform(video_file)
    brightres_df = pd.DataFrame(columns=brightres[0].to_df().columns)
    for a, ob in enumerate(brightres):
        t = ob.to_df()
        t['order'] = a
        brightres_df = brightres_df.append(t, ignore_index=True)

    # extract saliency
    print('Extracting saliency...')
    from pliers.extractors import SaliencyExtractor
    salext = SaliencyExtractor()
    salres = salext.transform(video_file)
    salres_df = pd.DataFrame(columns=salres[0].to_df().columns)
    for a, ob in enumerate(salres):
        t = ob.to_df()
        t['order'] = a
        salres_df = salres_df.append(t, ignore_index=True)

    # extract sharpness
    print('Extracting sharpness...')
    from pliers.extractors import SharpnessExtractor
    sharpext = SharpnessExtractor()
    sharpres = sharpext.transform(video_file)
    sharpres_df = pd.DataFrame(columns=sharpres[0].to_df().columns)
    for a, ob in enumerate(sharpres):
        t = ob.to_df()
        t['order'] = a
        sharpres_df = sharpres_df.append(t, ignore_index=True)

    # extract vibrance
    print('Extracting vibrance...')
    from pliers.extractors import VibranceExtractor
    vibext = VibranceExtractor()
    vibres = vibext.transform(video_file)
    vibres_df = pd.DataFrame(columns=vibres[0].to_df().columns)
    for a, ob in enumerate(vibres):
        t = ob.to_df()
        t['order'] = a
        vibres_df = vibres_df.append(t, ignore_index=True)

    # combine into 1 dataframe
    print('Combining data...')
    low_level_video_df = brightres_df.merge(salres_df[salres_df.columns[4:]],
                                            left_index=True,
                                            right_index=True)
    low_level_video_df = low_level_video_df.merge(
        sharpres_df[sharpres_df.columns[4:]],
        left_index=True,
        right_index=True)
    low_level_video_df = low_level_video_df.merge(
        vibres_df[vibres_df.columns[4:]], left_index=True, right_index=True)
    low_level_video_df['onset_ms'] = low_level_video_df['onset'] * 1000
    low_level_video_df.index = pd.to_datetime(low_level_video_df['onset_ms'],
                                              unit='ms')
    low_level_video_df = low_level_video_df.drop(
        ['max_saliency', 'max_y', 'max_x', 'onset', 'object_id', 'order'],
        axis=1)
    low_level_video_df.index.name = None
    print('Visual feature extraction complete.')
    return low_level_video_df
예제 #10
0
def test_transform_with_string_input():
    ext = BrightnessExtractor()
    res = ext.transform(join(get_test_data_path(), 'image', 'apple.jpg'))
    np.testing.assert_almost_equal(res.to_df()['brightness'].values[0], 0.887842942)