Пример #1
0
def test_big_pipeline():
    pytest.importorskip('pygraphviz')
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingFilter(every=15), [
        (TesseractConverter(), [LengthExtractor()]),
        VibranceExtractor(),
        'BrightnessExtractor',
    ])]
    audio_nodes = [(VideoToAudioConverter(),
                    [WitTranscriptionConverter(),
                     'LengthExtractor'], 'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    results = graph.run(video, merge=False)
    result = merge_results(results, format='wide', extractor_names='multi')
    # Test that pygraphviz outputs a file
    drawfile = next(tempfile._get_candidate_names())
    graph.draw(drawfile)
    assert exists(drawfile)
    os.remove(drawfile)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim_name'].values
    assert 'frame[90]' in result['stim_name'].values
Пример #2
0
def test_vibrance_extractor():
    stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'), onset=4.2, duration=1)
    result = VibranceExtractor().transform(stim).to_df()
    color = result['vibrance'][0]
    assert np.isclose(color, 1370.65482988, 1e-5)
    assert result['onset'][0] == 4.2
    assert result['duration'][0] == 1
Пример #3
0
def test_vibrance_extractor():
    image_dir = join(get_test_data_path(), 'image')
    stim = ImageStim(join(image_dir, 'apple.jpg'), onset=4.2, duration=1)
    result = VibranceExtractor().transform(stim).to_df()
    color = result['vibrance'][0]
    assert np.isclose(color, 1370.65482988, 1e-5)
    assert result['onset'][0] == 4.2
    assert result['duration'][0] == 1
Пример #4
0
def test_small_pipeline2():
    filename = join(get_test_data_path(), 'image', 'button.jpg')
    nodes = [BrightnessExtractor(), VibranceExtractor()]
    graph = Graph(nodes)
    result = list(graph.run([filename], merge=False))
    history = result[0].history.to_df()
    assert history.shape == (1, 8)
    result = merge_results(result, format='wide', extractor_names='multi')
    assert ('BrightnessExtractor', 'brightness') in result.columns
    brightness = result[('BrightnessExtractor', 'brightness')].values[0]
    vibrance = result[('VibranceExtractor', 'vibrance')].values[0]
    assert_almost_equal(brightness, 0.746965, 5)
    assert ('VibranceExtractor', 'vibrance') in result.columns
    assert_almost_equal(vibrance, 841.577274, 5)
Пример #5
0
def test_merge_extractor_results_by_features():
    np.random.seed(100)
    image_dir = join(get_test_data_path(), 'image')
    stim = ImageStim(join(image_dir, 'apple.jpg'))

    # Merge results for static Stims (no onsets)
    extractors = [BrightnessExtractor(), VibranceExtractor()]
    results = [e.transform(stim) for e in extractors]
    df = ExtractorResult.merge_features(results)

    de = DummyExtractor()
    de_names = ['Extractor1', 'Extractor2', 'Extractor3']
    results = [de.transform(stim, name) for name in de_names]
    df = ExtractorResult.merge_features(results)
    assert df.shape == (177, 14)
    assert df.columns.levels[1].unique().tolist() == ['duration', 0, 1, 2, '']
    cols = cols = ['onset', 'class', 'filename', 'history', 'stim']
    assert df.columns.levels[0].unique().tolist() == de_names + cols
Пример #6
0
def test_big_pipeline():
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingConverter(every=15), [
                        (TesseractConverter(), [LengthExtractor()]),
                        VibranceExtractor(), 'BrightnessExtractor',
                    ])]
    audio_nodes = [(VideoToAudioConverter(), [
                        WitTranscriptionConverter(), 'LengthExtractor'],
                        'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    result = graph.run(video)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim'].values
    assert 'frame[90]' in result['stim'].values
Пример #7
0
def extract_visual_features(video_file):
    """
    This function extracts luminance, vibrance, saliency, and sharpness from the frames of a video
    using the pliers library. If you use this function, please cite the pliers library directly:
    https://github.com/PsychoinformaticsLab/pliers#how-to-cite

    Parameters
    ----------
    video_file: str
        Path to video file to analyze.

    Returns
    -------
    low_level_video_df: DataFrame
        Pandas dataframe with a column per low-level feature.py (index is time).
    """

    # extract video luminance
    print('Extracting brightness...')
    from pliers.extractors import BrightnessExtractor
    brightext = BrightnessExtractor()
    brightres = brightext.transform(video_file)
    brightres_df = pd.DataFrame(columns=brightres[0].to_df().columns)
    for a, ob in enumerate(brightres):
        t = ob.to_df()
        t['order'] = a
        brightres_df = brightres_df.append(t, ignore_index=True)

    # extract saliency
    print('Extracting saliency...')
    from pliers.extractors import SaliencyExtractor
    salext = SaliencyExtractor()
    salres = salext.transform(video_file)
    salres_df = pd.DataFrame(columns=salres[0].to_df().columns)
    for a, ob in enumerate(salres):
        t = ob.to_df()
        t['order'] = a
        salres_df = salres_df.append(t, ignore_index=True)

    # extract sharpness
    print('Extracting sharpness...')
    from pliers.extractors import SharpnessExtractor
    sharpext = SharpnessExtractor()
    sharpres = sharpext.transform(video_file)
    sharpres_df = pd.DataFrame(columns=sharpres[0].to_df().columns)
    for a, ob in enumerate(sharpres):
        t = ob.to_df()
        t['order'] = a
        sharpres_df = sharpres_df.append(t, ignore_index=True)

    # extract vibrance
    print('Extracting vibrance...')
    from pliers.extractors import VibranceExtractor
    vibext = VibranceExtractor()
    vibres = vibext.transform(video_file)
    vibres_df = pd.DataFrame(columns=vibres[0].to_df().columns)
    for a, ob in enumerate(vibres):
        t = ob.to_df()
        t['order'] = a
        vibres_df = vibres_df.append(t, ignore_index=True)

    # combine into 1 dataframe
    print('Combining data...')
    low_level_video_df = brightres_df.merge(salres_df[salres_df.columns[4:]],
                                            left_index=True,
                                            right_index=True)
    low_level_video_df = low_level_video_df.merge(
        sharpres_df[sharpres_df.columns[4:]],
        left_index=True,
        right_index=True)
    low_level_video_df = low_level_video_df.merge(
        vibres_df[vibres_df.columns[4:]], left_index=True, right_index=True)
    low_level_video_df['onset_ms'] = low_level_video_df['onset'] * 1000
    low_level_video_df.index = pd.to_datetime(low_level_video_df['onset_ms'],
                                              unit='ms')
    low_level_video_df = low_level_video_df.drop(
        ['max_saliency', 'max_y', 'max_x', 'onset', 'object_id', 'order'],
        axis=1)
    low_level_video_df.index.name = None
    print('Visual feature extraction complete.')
    return low_level_video_df