Пример #1
0
def test_google_vision_face_batch():
    obama_file = join(get_test_data_path(), 'image', 'obama.jpg')
    people_file = join(get_test_data_path(), 'image', 'thai_people.jpg')
    stims = [ImageStim(obama_file), ImageStim(people_file)]
    ext = GoogleVisionAPIFaceExtractor(handle_annotations='first')
    result = ext.transform(stims)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' in result.columns
    assert result['face1_joyLikelihood'][0] == 'VERY_LIKELY'
    assert result['face1_joyLikelihood'][1] == 'VERY_LIKELY'

    video = VideoStim(join(get_test_data_path(), 'video', 'obama_speech.mp4'))
    conv = FrameSamplingFilter(every=10)
    video = conv.transform(video)
    result = ext.transform(video)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' in result.columns
    assert result.shape == (11, 137)

    video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))
    video = conv.transform(video)
    result = ext.transform(video)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' not in result.columns
    assert result.shape == (17, 7)
Пример #2
0
def test_batch_transformer():
    img1 = ImageStim(join(get_test_data_path(), 'image', 'apple.jpg'))
    img2 = ImageStim(join(get_test_data_path(), 'image', 'button.jpg'))
    img3 = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    ext = DummyBatchExtractor()
    res = ExtractorResult.merge_stims(ext.transform([img1, img2, img3]))
    assert ext.num_calls == 1
    assert res.shape == (3, 8)
    ext = DummyBatchExtractor(batch_size=1)
    res2 = ExtractorResult.merge_stims(ext.transform([img1, img2, img3]))
    assert ext.num_calls == 3
    assert res.equals(res2)
Пример #3
0
def test_google_vision_api_face_extractor_inits():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    assert ext.num_retries == 5
    assert ext.max_results == 100
    assert ext.service is not None

    # Test parsing of individual response
    filename = join(
        get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
    response = json.load(open(filename, 'r'))
    stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    res = ExtractorResult(response['faceAnnotations'], stim, ext)
    df = res.to_df()
    assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
    assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
    assert np.isnan(df['boundingPoly_vertex2_y'][0])
Пример #4
0
def test_google_vision_api_face_extractor_inits():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    assert ext.num_retries == 5
    assert ext.max_results == 100
    assert ext.service is not None

    # Test parsing of individual response
    filename = join(
        get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
    response = json.load(open(filename, 'r'))
    stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    res = ExtractorResult(response['faceAnnotations'], stim, ext)
    df = res.to_df()
    assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
    assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
    assert np.isnan(df['boundingPoly_vertex2_y'][0])
Пример #5
0
 def _extract(self, stim):
     time_bins = np.arange(0., stim.duration, 1.)
     return ExtractorResult(np.array([1] * len(time_bins)),
                            stim,
                            self,
                            features=['constant'],
                            onsets=time_bins,
                            durations=[1.] * len(time_bins))
Пример #6
0
def test_convert_to_long():
    audio_dir = join(get_test_data_path(), 'audio')
    stim = AudioStim(join(audio_dir, 'barber.wav'))
    ext = STFTAudioExtractor(frame_size=1., spectrogram=False,
                        freq_bins=[(100, 300), (300, 3000), (3000, 20000)])
    timeline = ext.transform(stim)
    long_timeline = to_long_format(timeline)
    assert long_timeline.shape == (timeline.to_df().shape[0] * 3, 4)
    assert 'feature' in long_timeline.columns
    assert 'value' in long_timeline.columns
    assert '100_300' not in long_timeline.columns
    timeline = ExtractorResult.merge_features([timeline])
    long_timeline = to_long_format(timeline)
    assert 'feature' in long_timeline.columns
    assert 'extractor' in long_timeline.columns
    assert '100_300' not in long_timeline.columns
Пример #7
0
def test_extractor_result_to_series_converter():
    data = [[2, 4], [1, 7], [6, 6], [8, 2]]
    result = ExtractorResult(data,
                             None,
                             None,
                             features=['a', 'b'],
                             onsets=[2, 4, 6, 8])
    stims = ExtractorResultToSeriesConverter().transform(result)
    assert len(stims) == 4
    stim = stims[2]
    assert isinstance(stim, SeriesStim)
    assert stim.data.shape == (2, )
    assert list(stim.data) == [6, 6]
    assert stim.onset == 6
    assert stim.duration is None
    assert stim.order == 2
Пример #8
0
 def _extract(self, stim, name=None, n_rows=100, n_cols=3, max_time=1000):
     data = np.random.randint(0, 1000, (n_rows, n_cols))
     onsets = np.random.choice(n_rows * 2, n_rows, False)
     if name is not None:
         self.name = name
     return ExtractorResult(data, stim, deepcopy(self), onsets=onsets)
Пример #9
0
 def _extract(self, stim):
     return ExtractorResult(np.array([[1]]),
                            stim,
                            self,
                            features=['constant'])