Ejemplo n.º 1
0
def test_convert_to_long():
    audio_dir = join(get_test_data_path(), 'audio')
    stim = AudioStim(join(audio_dir, 'barber.wav'))
    ext = STFTAudioExtractor(frame_size=1., spectrogram=False,
                        freq_bins=[(100, 300), (300, 3000), (3000, 20000)])
    timeline = ext.transform(stim)
    long_timeline = to_long_format(timeline)
    assert long_timeline.shape == (timeline.to_df().shape[0] * 3, 4)
    assert 'feature' in long_timeline.columns
    assert 'value' in long_timeline.columns
    assert '100_300' not in long_timeline.columns
    timeline = ExtractorResult.merge_features([timeline])
    long_timeline = to_long_format(timeline)
    assert 'feature' in long_timeline.columns
    assert 'extractor' in long_timeline.columns
    assert '100_300' not in long_timeline.columns
Ejemplo n.º 2
0
def test_convert_to_long_graph():
    image_dir = join(get_test_data_path(), 'image')
    stim = ImageStim(join(image_dir, 'obama.jpg'))
    ext = GoogleVisionAPIFaceExtractor()
    g = Graph([ext])
    timeline = g.run(stim)
    long_timeline = to_long_format(timeline)
    assert 'feature' in long_timeline.columns
    assert 'extractor' in long_timeline.columns
    assert 'history' in long_timeline.columns