示例#1
0
def test_google_vision_face_batch():
    obama_file = join(get_test_data_path(), 'image', 'obama.jpg')
    people_file = join(get_test_data_path(), 'image', 'thai_people.jpg')
    stims = [ImageStim(obama_file), ImageStim(people_file)]
    ext = GoogleVisionAPIFaceExtractor(handle_annotations='first')
    result = ext.transform(stims)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' in result.columns
    assert result['face1_joyLikelihood'][0] == 'VERY_LIKELY'
    assert result['face1_joyLikelihood'][1] == 'VERY_LIKELY'

    video = VideoStim(join(get_test_data_path(), 'video', 'obama_speech.mp4'))
    conv = FrameSamplingFilter(every=10)
    video = conv.transform(video)
    result = ext.transform(video)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' in result.columns
    assert result.shape == (11, 137)

    video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))
    video = conv.transform(video)
    result = ext.transform(video)
    result = ExtractorResult.merge_stims(result)
    assert 'face1_joyLikelihood' not in result.columns
    assert result.shape == (17, 7)
示例#2
0
def test_google_vision_face_batch():
    stims = ['apple', 'obama', 'thai_people']
    stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
                  for s in stims]
    stims = [ImageStim(s) for s in stim_files]
    ext = GoogleVisionAPIFaceExtractor(batch_size=5)
    result = ext.transform(stims)
    result = merge_results(result, format='wide', extractor_names=False,
                           handle_annotations='first')
    assert result.shape == (2, 139)
    assert 'joyLikelihood' in result.columns
    assert result['joyLikelihood'][0] == 'VERY_LIKELY'
    assert result['joyLikelihood'][1] == 'VERY_LIKELY'

    video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
    conv = FrameSamplingFilter(every=10)
    video = conv.transform(video)
    result = ext.transform(video)
    result = merge_results(result, format='wide', extractor_names=False)
    assert 'joyLikelihood' in result.columns
    assert result.shape == (22, 139)

    video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
    video = conv.transform(video)
    result = ext.transform(video)
    result = merge_results(result, format='wide', extractor_names=False)
    assert 'joyLikelihood' not in result.columns
    assert len(result) == 0
def test_google_vision_face_batch():
    stims = ['apple', 'obama', 'thai_people']
    stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
                  for s in stims]
    stims = [ImageStim(s) for s in stim_files]
    ext = GoogleVisionAPIFaceExtractor(batch_size=5)
    result = ext.transform(stims)
    result = merge_results(result, format='wide', extractor_names=False,
                           handle_annotations='first')
    assert result.shape == (2, 139)
    assert 'joyLikelihood' in result.columns
    assert result['joyLikelihood'][0] == 'VERY_LIKELY'
    assert result['joyLikelihood'][1] == 'VERY_LIKELY'

    video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
    conv = FrameSamplingFilter(every=10)
    video = conv.transform(video)
    result = ext.transform(video)
    result = merge_results(result, format='wide', extractor_names=False)
    assert 'joyLikelihood' in result.columns
    assert result.shape == (22, 139)

    video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
    video = conv.transform(video)
    result = ext.transform(video)
    result = merge_results(result, format='wide', extractor_names=False)
    assert 'joyLikelihood' not in result.columns
    assert len(result) == 0
def test_google_vision_api_face_extractor():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    filename = join(get_test_data_path(), 'image', 'obama.jpg')
    stim = ImageStim(filename)
    result = ext.transform(stim).to_df()
    assert 'joyLikelihood' in result.columns
    assert result['joyLikelihood'][0] == 'VERY_LIKELY'
    assert result['face_detectionConfidence'][0] > 0.7
def test_google_vision_multiple_face_extraction():
    filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
    stim = ImageStim(filename)
    # Only first record
    ext = GoogleVisionAPIFaceExtractor()
    result1 = ext.transform(stim).to_df(handle_annotations='first')
    assert 'joyLikelihood' in result1.columns
    # All records
    ext = GoogleVisionAPIFaceExtractor()
    result2 = ext.transform(stim).to_df()
    assert 'joyLikelihood' in result2.columns
    assert result2.shape[0] > result1.shape[0]
def test_google_vision_api_face_extractor():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    assert ext.validate_keys()
    filename = join(get_test_data_path(), 'image', 'obama.jpg')
    stim = ImageStim(filename)
    result = ext.transform(stim).to_df()
    assert 'joyLikelihood' in result.columns
    assert result['joyLikelihood'][0] == 'VERY_LIKELY'
    assert float(result['face_detectionConfidence'][0]) > 0.7

    ext = GoogleVisionAPIFaceExtractor(discovery_file='nogood')
    assert not ext.validate_keys()
def test_google_vision_api_face_extractor_inits():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    assert ext.num_retries == 5
    assert ext.max_results == 100
    assert ext.service is not None

    # Test parsing of individual response
    filename = join(get_test_data_path(), 'payloads',
                    'google_vision_api_face_payload.json')
    response = json.load(open(filename, 'r'))
    features, data = ext._parse_annotations(response['faceAnnotations'])
    assert len(features) == len(data)
    assert data[features.index('angerLikelihood')] == 'VERY_UNLIKELY'
    assert data[features.index(
        'landmark_LEFT_EYE_BOTTOM_BOUNDARY_y')] == 257.023
    assert np.isnan(data[features.index('boundingPoly_vertex2_y')])
示例#8
0
def test_convert_to_long_graph():
    image_dir = join(get_test_data_path(), 'image')
    stim = ImageStim(join(image_dir, 'obama.jpg'))
    ext = GoogleVisionAPIFaceExtractor()
    g = Graph([ext])
    timeline = g.run(stim)
    long_timeline = to_long_format(timeline)
    assert 'feature' in long_timeline.columns
    assert 'extractor' in long_timeline.columns
    assert 'history' in long_timeline.columns
示例#9
0
def extract_faces(video):
    frame_sampling_filter = FrameSamplingFilter(hertz=1)
    sampled_video = frame_sampling_filter.transform(video)

    ext = GoogleVisionAPIFaceExtractor()
    results = ext.transform(sampled_video)
    res = merge_results(results,
                        metadata=False,
                        format='long',
                        extractor_names=False,
                        object_id=False)

    res = res[res['feature'] == 'face_detectionConfidence']
    res = res.drop(['order'], axis=1)
    res = res.fillna(0)
    res['value'] = np.round(res['value'])
    res.rename(columns={
        'value': 'modulation',
        'feature': 'trial_type'
    },
               inplace=True)
    res.to_csv('events/visual_face_events.csv')
示例#10
0
def test_google_vision_api_face_extractor_inits():
    ext = GoogleVisionAPIFaceExtractor(num_retries=5)
    assert ext.num_retries == 5
    assert ext.max_results == 100
    assert ext.service is not None

    # Test parsing of individual response
    filename = join(
        get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
    response = json.load(open(filename, 'r'))
    stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
    res = ExtractorResult(response['faceAnnotations'], stim, ext)
    df = res.to_df()
    assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
    assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
    assert np.isnan(df['boundingPoly_vertex2_y'][0])
示例#11
0
def test_google_vision_multiple_face_extraction():
    filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
    stim = ImageStim(filename)
    # Only first record
    ext = GoogleVisionAPIFaceExtractor()
    result1 = ext.transform(stim).to_df(handle_annotations='first')
    assert 'joyLikelihood' in result1.columns
    # All records
    ext = GoogleVisionAPIFaceExtractor()
    result2 = ext.transform(stim).to_df()
    assert 'joyLikelihood' in result2.columns
    assert result2.shape[0] > result1.shape[0]
示例#12
0
# obama_trump.py
# inspired by https://github.com/tyarkoni/pliers
# see also http://tyarkoni.github.io/pliers/

from pliers.extractors import GoogleVisionAPIFaceExtractor

obama_img = 'img/obama.jpg'
trump_img = 'img/trump.jpg'

ext = GoogleVisionAPIFaceExtractor()

obama = ext.transform(obama_img).to_df()
trump = ext.transform(trump_img).to_df()

print(obama['angerLikelihood'])
print(obama['joyLikelihood'])
print(trump['angerLikelihood'])
print(trump['joyLikelihood'])
示例#13
0
from pliers.extractors import GoogleVisionAPIFaceExtractor

ext = GoogleVisionAPIFaceExtractor()

result = ext.transform('/home/icarus/projects/pliers/pliers/tests/data/image/obama.jpg').to_df()

print result