import requests
import io
import sys
import os
import uuid
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

key = '346d220ffb134f7799e990e819b43951'
endPoint = 'https://cristianpalta.cognitiveservices.azure.com/'

face_client = FaceClient(endPoint, CognitiveServicesCredentials(key))

print("Estoy conectado")

simple_face_image_url = 'https://twitter.com/CristianPalta1/photo.'
simple_image_name = os.path.basename(simple_face_image_url)
detected_faces = face_client.face.detect_with_url(url=simple_face_image_url)

if not detected_faces:
    raise Exception('No enconre caras :(')
print('Encontre caras y su Id es: ', simple_image_name)

# for face in similar_face:
#     first_image_face_ID = face.face_id
#python faceId.py
Beispiel #2
0
def get_face_client():
    """Create an authenticated FaceClient."""
    SUBSCRIPTION_KEY = '9d6cb49831114629b16ca379307fc583'
    ENDPOINT = 'https://cristian.cognitiveservices.azure.com/'
    credential = CognitiveServicesCredentials(SUBSCRIPTION_KEY)
    return FaceClient(ENDPOINT, credential)
Beispiel #3
0
import cv2
import os
import io
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

face_key = '300a5f6336924cfd964eae736e2792ac'
face_endpoint = 'https://faceopencv.cognitiveservices.azure.com/'
credentials = CognitiveServicesCredentials(face_key)
client = FaceClient(face_endpoint, credentials)

face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
                                     "haarcascade_frontalface_default.xml")

print(cv2.data.haarcascades)
video_capture = cv2.VideoCapture(0)
csfinished = False
emotions = []
framecount = 0
while True:
    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.1, 1, False, (200, 200))
    if faces is None:
        csfinished == False

    #Desenhar retangulo envolta da face
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
Beispiel #4
0
TARGET_ENDPOINT = os.environ["FACE_ENDPOINT2"]
# Target subscription key. Must match the target endpoint region/subscription.
TARGET_KEY = os.environ['FACE_SUBSCRIPTION_KEY2']
# Target subscription ID. It will be the same as the source ID if created Face resources from the same subscription (but moving from region to region). If they are differnt subscriptions, add the other target ID here.
TARGET_ID = os.environ['AZURE_SUBSCRIPTION_ID']
# NOTE: We do not need to specify the target PersonGroup ID here because we generate it with this example.
# Each new location you transfer a person group to will have a generated, new person group ID for that region.
# </snippet_snapshotvars>

'''
Authenticate
All examples use the same client, except for Snapshot Operations.
'''
# <snippet_auth>
# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
# </snippet_auth>
'''
END - Authenticate
'''

'''
Detect faces in two images
'''
print('-----------------------------')
print() 
print('DETECT FACES')
print() 
# <snippet_detect>
# Detect a face in an image that contains a single face
single_face_image_url = 'https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg'
cosmos_collection_link = 'dbs/workshop/colls/faces'

client = cosmos_client.CosmosClient(url_connection=cosmos_url,
                                    auth={'masterKey': cosmos_primary_key})


@app.route('/')
def home():
    docs = list(client.ReadItems(cosmos_collection_link))
    return render_template('home.html', result=docs)


face_api_endpoint = '<Your Face Api Endpoint>'
face_api_key = '<Your Face Api Key>'
credentials = CognitiveServicesCredentials(face_api_key)
face_client = FaceClient(face_api_endpoint, credentials=credentials)


def best_emotion(emotion):
    emotions = {}
    emotions['anger'] = emotion.anger
    emotions['contempt'] = emotion.contempt
    emotions['disgust'] = emotion.disgust
    emotions['fear'] = emotion.fear
    emotions['happiness'] = emotion.happiness
    emotions['neutral'] = emotion.neutral
    emotions['sadness'] = emotion.sadness
    emotions['surprise'] = emotion.surprise
    return max(zip(emotions.values(), emotions.keys()))[1]

Beispiel #6
0
def verify_in_large_person_group(subscription_key):
    """VerifyInLargePersonGroup.

    This will verify whether faces detected as similar in a large group are of the same person.
    """

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(
        endpoint=face_base_url,
        credentials=CognitiveServicesCredentials(subscription_key))

    image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
    target_image_file_names = ["Family1-Dad1.jpg", "Family1-Dad2.jpg"]
    source_image_file_name1 = "Family1-Dad3.jpg"

    # Create a large person group.
    large_person_group_id = str(uuid.uuid4())
    print("Create a large person group {}.".format(large_person_group_id))
    face_client.large_person_group.create(
        large_person_group_id=large_person_group_id,
        name=large_person_group_id)

    person_id = face_client.large_person_group_person.create(
        large_person_group_id=large_person_group_id, name="Dad").person_id

    # Create a large person group person.
    p = Person(name="Dad", user_data="Person for sample", person_id=person_id)
    print("Create a large person group person {}.".format(p.name))

    for target_image_file_name in target_image_file_names:
        # Add face to the large person group.
        print("Add face to the large person group person {} from image {}.".
              format(p.name, target_image_file_name))
        faces = face_client.large_person_group_person.add_face_from_url(
            large_person_group_id=large_person_group_id,
            person_id=p.person_id,
            url=image_url_prefix + target_image_file_name,
            user_data=target_image_file_name)

        if not faces:
            raise Exception("No persisted face from image {}.".format(
                target_image_file_name))

    # Verification example for faces of the same person.
    verify_result = face_client.face.verify_face_to_person(
        face_id=_detect_faces_helper(face_client=face_client,
                                     image_url=image_url_prefix +
                                     source_image_file_name1)[0].face_id,
        person_id=p.person_id,
        large_person_group_id=large_person_group_id)
    if verify_result.is_identical:
        print(
            "Faces from {} & {} are of the same (Positive) person, similarity confidence: {}."
            .format(source_image_file_name1, p.name, verify_result.confidence))
    else:
        print(
            "Faces from {} & {} are of different (Negative) persons, similarity confidence: {}."
            .format(source_image_file_name1, p.name, verify_result.confidence))

    # Delete the person group.
    print("Delete the large person group {}.\n".format(large_person_group_id))
    face_client.large_person_group.delete(
        large_person_group_id=large_person_group_id)
Beispiel #7
0
    FACE_END = os.getenv('FACE_END')
    LINE_SECRET = os.getenv('LINE_SECRET')
    LINE_TOKEN = os.getenv('LINE_TOKEN')
    IMGUR_CONFIG = {
        "client_id": os.getenv('IMGUR_ID'),
        "client_secret": os.getenv('IMGUR_SECRET'),
        "access_token": os.getenv('IMGUR_ACCESS'),
        "refresh_token": os.getenv('IMGUR_REFRESH')
    }

CV_CLIENT = ComputerVisionClient(
    ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
LINE_BOT = LineBotApi(LINE_TOKEN)
HANDLER = WebhookHandler(LINE_SECRET)
IMGUR_CLIENT = Imgur(config=IMGUR_CONFIG)
FACE_CLIENT = FaceClient(FACE_END, CognitiveServicesCredentials(FACE_KEY))
PERSON_GROUP_ID = "liu_ffaaccee"


@app.route("/")
def hello():
    "hello world"
    return "Hello World!!!!!"


def azure_describe(url):
    """
    Output azure image description result
    """
    description_results = CV_CLIENT.describe_image(url)
    output = ""
    def test_snapshot(self):
        credentials = CognitiveServicesCredentials(
            self.settings.CS_SUBSCRIPTION_KEY)
        face_client = FaceClient("https://westus2.api.cognitive.microsoft.com",
                                 credentials=credentials)

        # Create a PersonGroup.
        personGroupId = "69ff3e98-2de7-468e-beae-f78aa85200db"
        newPersonGroupId = "fb644ecf-3ed0-4b25-9270-1d174b980afb"

        face_client.person_group.create(personGroupId, "test", "test")

        # Take a snapshot for the PersonGroup
        apply_scope = ["Apply-Scope-Subscriptions"]
        snapshot_type = "PersonGroup"

        takeSnapshotResponse = face_client.snapshot.take(snapshot_type,
                                                         personGroupId,
                                                         apply_scope,
                                                         raw=True)
        takeOperationId = takeSnapshotResponse.headers[
            "Operation-Location"].split("/")[2]

        getOperationStatusResponse = face_client.snapshot.get_operation_status(
            takeOperationId)
        operationStatus = getOperationStatusResponse.additional_properties[
            "Status"]

        # Wait for take operation to complete.
        while operationStatus != "succeeded" and operationStatus != "failed":
            getOperationStatusResponse = face_client.snapshot.get_operation_status(
                takeOperationId)
            operationStatus = getOperationStatusResponse.additional_properties[
                "Status"]
            if self.is_live:
                sleep(1)

        self.assertEqual(operationStatus, "succeeded")

        snapshotId = getOperationStatusResponse.additional_properties[
            "ResourceLocation"].split("/")[2]

        # Apply the snapshot to a new PersonGroup.
        applySnapshotResponse = face_client.snapshot.apply(snapshotId,
                                                           newPersonGroupId,
                                                           raw=True)
        applyOperationId = applySnapshotResponse.headers[
            "Operation-Location"].split("/")[2]

        applyOperationStatusResponse = face_client.snapshot.get_operation_status(
            applyOperationId)
        operationStatus = applyOperationStatusResponse.additional_properties[
            "Status"]

        # Wait for apply operation to complete.
        while operationStatus != "succeeded" and operationStatus != "failed":
            applyOperationStatusResponse = face_client.snapshot.get_operation_status(
                applyOperationId)
            operationStatus = applyOperationStatusResponse.additional_properties[
                "Status"]
            if self.is_live:
                sleep(1)

        self.assertEqual(operationStatus, "succeeded")

        face_client.snapshot.delete(snapshotId)
        face_client.person_group.delete(personGroupId)
        face_client.person_group.delete(newPersonGroupId)
Beispiel #9
0
from msrest.authentication import CognitiveServicesCredentials
from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import (MessageEvent, TextMessage, TextSendMessage,
                            ImageMessage)

app = Flask(__name__)

YOUR_CHANNEL_ACCESS_TOKEN = os.getenv('YOUR_CHANNEL_ACCESS_TOKEN')
YOUR_CHANNEL_SECRET = os.getenv('YOUR_CHANNEL_SECRET')
YOUR_FACE_API_KEY = os.environ["YOUR_FACE_API_KEY"]
YOUR_FACE_API_ENDPOINT = os.environ["YOUR_FACE_API_ENDPOINT"]
PERSON_GROUP_ID = os.getenv('PERSON_GROUP_ID')
PERSON_ID_AUDREY = os.getenv('PERSON_ID_AUDREY')

face_client = FaceClient(YOUR_FACE_API_ENDPOINT,
                         CognitiveServicesCredentials(YOUR_FACE_API_KEY))
line_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(YOUR_CHANNEL_SECRET)


@app.route("/callback", methods=['POST'])
def callback():
    # get X-Line-Signature header value
    signature = request.headers['X-Line-Signature']

    # get request body as text
    body = request.get_data(as_text=True)
    app.logger.info("Request body: " + body)

    # handle webhook body
    try:
Beispiel #10
0
import os, io, base64, random, time, requests
from flask import Flask, render_template, request, jsonify, make_response
from azure.cognitiveservices.vision.face import FaceClient, models
from msrest.authentication import CognitiveServicesCredentials

credentials = CognitiveServicesCredentials(os.environ['FACE_SUBSCRIPTION_KEY'])
face_client = FaceClient(os.environ['FACE_ENDPOINT'], credentials=credentials)

app = Flask(__name__)


# The root route, returns the home.html page
@app.route('/')
def home():
    # Add any required page data here
    page_data = {}
    return render_template('home.html', page_data=page_data)


def is_happy(emotion):
    emotions = {}
    emotions['anger'] = emotion.anger
    emotions['contempt'] = emotion.contempt
    emotions['disgust'] = emotion.disgust
    emotions['fear'] = emotion.fear
    emotions['happiness'] = emotion.happiness
    emotions['neutral'] = emotion.neutral
    emotions['sadness'] = emotion.sadness
    emotions['surprise'] = emotion.surprise
    best_emotion = max(zip(emotions.values(), emotions.keys()))[1]
    return best_emotion == 'happiness'
Beispiel #11
0
import pickle
import os
import io
import serial
import time
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
#from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
arduino = serial.Serial('COM4', 9600, timeout=.1)

facekey = "yourfacekey"
endpoint = "https://xxxxxx.api.cognitive.microsoft.com/"
facecred = CognitiveServicesCredentials(facekey)
client = FaceClient(endpoint,facecred)

cvkey = "yourvisionkey"
cvcecred = CognitiveServicesCredentials(cvkey)
cvclient = ComputerVisionClient(endpoint, cvcecred)

#cuvkey = "youtcustomvisionkey"
#cuvcecred = CognitiveServicesCredentials(cuvkey)
#cuvclient = CustomVisionPredictionClient(cuvkey,endpoint)

face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")

Happinessprobability =""
playhappy = False
Beispiel #12
0
Prerequisites:
    Python 3+
    Install Face SDK: pip install azure-cognitiveservices-vision-face
'''
# Group image for testing against
group_photo = 'test-image.jpg'
# To add subdirectories, ex: (os.path.realpath(__file__), "images-directory", "above-images-directory")
IMAGES_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)))
''' 
Authentication
'''
# Replace with a valid subscription key (keeping the quotes in place).
KEY = '<ADD SUBSCRIPTION KEY HERE>'
# Replace westus if it's not your region
BASE_URL = 'https://westus.api.cognitive.microsoft.com'
face_client = FaceClient(BASE_URL, CognitiveServicesCredentials(KEY))
''' 
Create the PersonGroup
'''
# Create empty person group
# person_group_id = str(uuid.uuid4()) # Uncomment to generate a random ID
person_group_id = 'my-unique-person-group'
print(person_group_id)
face_client.person_group.create(person_group_id=person_group_id,
                                name=person_group_id)

# Define woman friend
woman = face_client.person_group_person.create(person_group_id, "Woman")
# Define man friend
man = face_client.person_group_person.create(person_group_id, "Man")
# Define child friend
def findFace(url):
    # Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
    # This key will serve all examples in this document.
    KEY = '597b454dab8e43fda9474e5ca49f963a'
    #os.getenv('COGNITIVE_SERVICE_KEY')

    # Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
    # This endpoint will be used in all examples in this quickstart.
    ENDPOINT = 'https://ai-project-4.cognitiveservices.azure.com/'
    #os.getenv('FACE_ENDPOINT')

    # Create an authenticated FaceClient.
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    # Detect a face in an image that contains a single face
    single_face_image_url = url
    single_image_name = os.path.basename(single_face_image_url)
    detected_faces = face_client.face.detect_with_url(
        url=single_face_image_url)
    if not detected_faces:
        print('\nThere are no detected faces in the image ', single_image_name,
              "\n")
        return False
    #raise Exception('No face detected from image {}'.format(single_image_name))

    # Display the detected face ID in the first single-face image.
    # Face IDs are used for comparison to faces (their IDs) detected in other images.
    print('Detected face ID from', single_image_name, ':')
    for face in detected_faces:
        print(face.face_id)
    print()

    # Save this ID for use in Find Similar
    first_image_face_ID = detected_faces[0].face_id

    # Detect the faces in an image that contains multiple faces
    # Each detected face gets assigned a new ID
    multi_face_image_url = url
    multi_image_name = os.path.basename(multi_face_image_url)
    detected_faces2 = face_client.face.detect_with_url(
        url=multi_face_image_url)
    # Search through faces detected in group image for the single face from first image.
    # First, create a list of the face IDs found in the second image.
    second_image_face_IDs = list(map(lambda x: x.face_id, detected_faces2))
    # Next, find similar face IDs like the one detected in the first image.
    similar_faces = face_client.face.find_similar(
        face_id=first_image_face_ID, face_ids=second_image_face_IDs)
    if not similar_faces[0]:
        print('No similar faces found in', multi_image_name, '.')
        return False
    # Print the details of the similar faces detected
    print('Similar faces found in', multi_image_name + ':')
    for face in similar_faces:
        first_image_face_ID = face.face_id
        # The similar face IDs of the single face image and the group image do not need to match,
        # they are only used for identification purposes in each image.
        # The similar faces are matched using the Cognitive Services algorithm in find_similar().
        face_info = next(x for x in detected_faces2
                         if x.face_id == first_image_face_ID)
        if face_info:
            print('  Face ID: ', first_image_face_ID)
            print('  Face rectangle:')
            print('    Left: ', str(face_info.face_rectangle.left))
            print('    Top: ', str(face_info.face_rectangle.top))
            print('    Width: ', str(face_info.face_rectangle.width))
            print('    Height: ', str(face_info.face_rectangle.height))

        return True

    return False
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType

# Create an authenticated FaceClient.
face_client = FaceClient('Endpoint', CognitiveServicesCredentials('KEY'))

# Detect a face in an image that contains a single face
single_face_image_url = 'Your_image_URL'
single_image_name = os.path.basename(single_face_image_url)
detected_faces = face_client.face.detect_with_url(url=single_face_image_url)
if not detected_faces:
    raise Exception('No face detected from image {}'.format(single_image_name))

# Display the detected face ID in the first single-face image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for face in detected_faces: 
    print (face.face_id)
print()
Beispiel #15
0
def identify_in_large_person_group(subscription_key):
    """IdentifyInLargePersonGroup.

    This will identify faces in a large person group.
    """

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(
        endpoint=face_base_url,
        credentials=CognitiveServicesCredentials(subscription_key))
    image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
    target_image_file_dictionary = {
        "Family1-Dad": ["Family1-Dad1.jpg", "Family1-Dad2.jpg"],
        "Family1-Mom": ["Family1-Mom1.jpg", "Family1-Mom2.jpg"],
        "Family1-Son": ["Family1-Son1.jpg", "Family1-Son2.jpg"],
        "Family1-Daughter": ["Family1-Daughter1.jpg", "Family1-Daughter2.jpg"],
        "Family2-Lady": ["Family2-Lady1.jpg", "Family2-Lady2.jpg"],
        "Family2-Man": ["Family2-Man1.jpg", "Family2-Man2.jpg"]
    }
    source_image_file_name = "identification1.jpg"

    # Create a large person group.
    large_person_group_id = str(uuid.uuid4())
    print("Create a large person group {}.".format(large_person_group_id))
    face_client.large_person_group.create(
        large_person_group_id=large_person_group_id,
        name=large_person_group_id)

    for target_image_file_dictionary_name in target_image_file_dictionary.keys(
    ):
        person_id = face_client.large_person_group_person.create(
            large_person_group_id=large_person_group_id,
            name=target_image_file_dictionary_name).person_id

        # Create a person group person.
        person = Person(name=target_image_file_dictionary_name,
                        user_data="Person for sample",
                        person_id=person_id)

        print("Create a large person group person {}.".format(person.name))

        for target_image_file_name in target_image_file_dictionary[
                target_image_file_dictionary_name]:
            # Add face to the person group person
            print("Add face to the large person group person {} from image.".
                  format(target_image_file_dictionary_name,
                         target_image_file_name))
            face = face_client.large_person_group_person.add_face_from_url(
                large_person_group_id=large_person_group_id,
                person_id=person.person_id,
                url=image_url_prefix + target_image_file_name,
                user_data=target_image_file_name)
            if not face:
                raise Exception("No persisted face from image {}".format(
                    target_image_file_name))

    # Start to train the large person group.
    print("Train large person group {}.".format(large_person_group_id))
    face_client.large_person_group.train(
        large_person_group_id=large_person_group_id)
    training_status = face_client.large_person_group.get_training_status(
        large_person_group_id=large_person_group_id)
    print("Training status is {}".format(training_status.status))
    if training_status.status == TrainingStatusType.failed:
        raise Exception("Training failed with message {}.".format(
            training_status.message))

    # Detect faces from source image url and add detected face ids to source_face_ids
    source_face_ids = [
        detected_face.face_id
        for detected_face in _detect_faces_helper(face_client=face_client,
                                                  image_url=image_url_prefix +
                                                  source_image_file_name)
    ]

    # Identify example of identifying faces towards large person group.
    identify_results = face_client.face.identify(
        face_ids=source_face_ids, large_person_group_id=large_person_group_id)
    if not identify_results:
        print(
            "No person identified in the large person group for faces from the {}."
            .format(source_image_file_name))
        return

    for identify_result in identify_results:
        person = face_client.large_person_group_person.get(
            large_person_group_id=large_person_group_id,
            person_id=identify_result.candidates[0].person_id)
        print("Person {} is identified for face: {} - {}, confidence: {}.".
              format(person.name, source_image_file_name,
                     identify_result.face_id,
                     identify_result.candidates[0].confidence))

    # Delete the person group.
    face_client.large_person_group.delete(
        large_person_group_id=large_person_group_id)
    print("Delete the large person group {}.\n".format(large_person_group_id))
Beispiel #16
0
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType

'''
# Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
# This key will serve all examples in this document.
KEY = os.environ['c8c8240e711641f5b157b8eb37d6c908']

# Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = os.environ['https://westeurope.api.cognitive.microsoft.com/']
'''

face_client = FaceClient('https://westeurope.api.cognitive.microsoft.com/', CognitiveServicesCredentials('c8c8240e711641f5b157b8eb37d6c908'))

# Detect a face in an image that contains a single face
single_face_image_url = 'https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg'
single_image_name = os.path.basename(single_face_image_url)
detected_faces = face_client.face.detect_with_url(url=single_face_image_url)
if not detected_faces:
    raise Exception('No face detected from image {}'.format(single_image_name))

# Display the detected face ID in the first single-face image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for face in detected_faces:
    print(dir(face))    
print()
Beispiel #17
0
def verify_face_to_face(subscription_key):
    """VerifyFaceToFace.

    This will verify whether faces detected as similar are the same person.
    """

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(
        endpoint=face_base_url,
        credentials=CognitiveServicesCredentials(subscription_key))

    image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
    target_image_file_names = ["Family1-Dad1.jpg", "Family1-Dad2.jpg"]
    source_image_file_name1 = "Family1-Dad3.jpg"
    source_image_file_name2 = "Family1-Son1.jpg"

    # Detect faces from target image url and add their face ids to target_face_ids
    target_face_ids = [
        _detect_faces_helper(face_client=face_client,
                             image_url=image_url_prefix +
                             image_file_name)[0].face_id
        for image_file_name in target_image_file_names
    ]

    # Detect faces from source image file 1.
    detected_faces1 = _detect_faces_helper(face_client=face_client,
                                           image_url=image_url_prefix +
                                           source_image_file_name1)
    source_face_id1 = detected_faces1[0].face_id

    # Detect faces from source image file 2.
    detected_faces2 = _detect_faces_helper(face_client=face_client,
                                           image_url=image_url_prefix +
                                           source_image_file_name2)
    source_face_id2 = detected_faces2[0].face_id

    # Verification example for faces of the same person.
    verify_result1 = face_client.face.verify_face_to_face(
        face_id1=source_face_id1, face_id2=target_face_ids[0])
    if verify_result1.is_identical:
        print(
            "Faces from {} & {} are of the same (Positive) person, similarity confidence: {}."
            .format(source_image_file_name1, target_image_file_names[0],
                    verify_result1.confidence))
    else:
        print(
            "Faces from {} & {} are of different (Negative) persons, similarity confidence: {}."
            .format(source_image_file_name1, target_image_file_names[0],
                    verify_result1.confidence))

    # Verification example for faces of different persons.
    verify_result2 = face_client.face.verify_face_to_face(
        face_id1=source_face_id2, face_id2=target_face_ids[0])
    if verify_result2.is_identical:
        print(
            "Faces from {} & {} are of the same (Negative) person, similarity confidence: {}.\n"
            .format(source_image_file_name2, target_image_file_names[0],
                    verify_result2.confidence))
    else:
        print(
            "Faces from {} & {} are of different (Positive) persons, similarity confidence: {}.\n"
            .format(source_image_file_name2, target_image_file_names[0],
                    verify_result2.confidence))
Beispiel #18
0
 def __init__(self):
     self.client = FaceClient(
         FACE_API_ENDPOINT,
         CognitiveServicesCredentials(FACE_API_KEY),
     )
     self.formator = get_formator(Source.FACE_API)()
Beispiel #19
0
def image_verify(cID):
    allpic_df = pd.read_csv('IdentityPics - Copy.csv')
    imagePath = 'identityPics-custID_PicID/'

    # This key will serve all examples in this document.
    KEY = "28e599a416f44e37af84ebf4967b2702"

    # This endpoint will be used in all examples in this quickstart.
    ENDPOINT = "https://face20.cognitiveservices.azure.com/"

    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    PERSON_GROUP_ID = 123

    test_Df = cID
    #test_Df[['custID', 'ID_jpg']] = test_Df.Cust_bankAcctID.str.split('_', expand=True)
    acct_series = pd.Series(test_Df["Cust_bankAcctID"].values)
    filepath = 'Images/'

    faceIds = []
    identified_faces = []
    num_left = 0
    count = 0

    # Output df
    output_df = acct_series.to_frame()  #dataframe nly have cust_bankacctId

    output_df = output_df.rename(
        columns={list(output_df)[0]: "Cust_bankAcctID"})
    print('output_df', output_df)
    output_df.insert(1, "face_id", 0)
    output_df.insert(2, "confidence", 0.3)
    output_df.insert(3, "identified_id", 0)
    output_df.insert(4, "custID", 0)

    acc_series_len = len(acct_series)
    if acc_series_len < 10:
        num_left = acc_series_len
    else:
        num_left = 10

    for acct in acct_series:

        #print('acct', type(acct))
        # The source photos contain this person

        source_image_file_name1 = test_Df[test_Df['Cust_bankAcctID'] ==
                                          acct].iloc[0]['Cust_bankAcctID']
        #print('source_image_file_name1----->' , source_image_file_name1)
        testimg = filepath + source_image_file_name1

        sourceimage = cv2.imread(testimg)
        ret, buf = cv2.imencode('.jpg', sourceimage)  #

        # stream-ify the buffer
        stream = io.BytesIO(buf)

        detected_faces1 = face_client.face.detect_with_stream(
            stream, detection_model="detection_01")  ## TestImage
        print(detected_faces1[0].face_id, '**********')

        faceIds.append(
            detected_faces1[0].face_id)  #  faceIds = [] getting populated

        # TestImg face_id in outputDf
        output_df.loc[output_df.Cust_bankAcctID == acct,
                      "face_id"] = detected_faces1[0].face_id
        output_df.loc[output_df.Cust_bankAcctID == acct,
                      "custID"] = acct.split('_')[0]
        count = count + 1

        #print('output_df  229' , output_df)
        # BatchPrinting of face_Ids of testImage
        #print('num_left', num_left , 'count-----' ,count)
        #print(count,'count')

        if (count == num_left):

            # print('****faceIds***',faceIds)
            identified_faces.extend(
                face_client.face.identify(face_ids=faceIds,
                                          person_group_id=PERSON_GROUP_ID,
                                          max_num_of_candidates_returned=1,
                                          confidence_threshold=0.45))
            faceIds = []
            num_left = acc_series_len - num_left
            #print(num_left, 'numleft')
            count = 0
            #print(count,'count2')
        time.sleep(4)
    #print('num_left2', num_left, 'count-----2', count)

    # identified_faces = face_client.face.identify(face_ids= faceIds, person_group_id=PERSON_GROUP_ID, max_num_of_candidates_returned=1, confidence_threshold=0.56)

    for face in identified_faces:
        conf = 0.2
        faceid = face.face_id  # face is the index pointing to identified_faces

        idf_id = 'A'

        ids = output_df.loc[output_df.face_id == faceid]
        # print("ids is: ", ids)

        bnk_id = ids["custID"].iloc[0]
        # print("bnk_id is: ", ids)
        if len(face.candidates) > 0:
            conf = face.candidates[0].confidence
            output_df.loc[output_df.face_id == faceid, "confidence"] = conf
            # print(face.candidates[0].person_id)
            person_id = face_client.person_group_person.get(
                person_group_id="123", person_id=face.candidates[0].person_id)

            output_df.loc[output_df.face_id == faceid,
                          "identified_id"] = person_id.name + '.jpg'
            idf_id = person_id.name  #+ '.jpg'

            time.sleep(3)
        match = 0
        conf1 = conf * 100
        # print("conf bnk_id idf_id")
        # print(conf1,"  ", bnk_id, "  ", idf_id)
        #print('output_df new coulmns', output_df)

        print(output_df)
        if conf1 > 56 and bnk_id == idf_id:
            match = 1
        else:
            match = 0
        output_df.loc[output_df.face_id == faceid, "verifiedID"] = match

    print('**************output_df******', output_df)
    output_df.to_csv('conf.csv')

    image_d = output_df[output_df.verifiedID == 1]

    print('**************image_d******', image_d)
    image_df = image_d.drop(
        columns=['face_id', 'custID', 'confidence', 'identified_id'])
    #print('image_df',image_df)
    return image_df
  pip install --upgrade azure-cognitiveservices-vision-face
'''

# Images for detection/comparison
single_face_image_url = 'https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg'
multi_face_image_url = "http://www.historyplace.com/kennedy/president-family-portrait-closeup.jpg"

# Add your Cognitive Services subscription key and endpoint to your environment variables.
subscription_key = os.environ['COGNITIVE_SERVICES_SUBSCRIPTION_KEY']
endpoint = os.environ['COGNITIVE_SERVICES_ENDPOINT']

'''
AUTHENTICATE
Create a Face client
'''
face_client = FaceClient(endpoint, CognitiveServicesCredentials(subscription_key))

'''
FACE 
Detect faces in 2 images, then find a similar face between them.
'''
# Detect a face in an image that contains a single face
single_image_name = os.path.basename(single_face_image_url)
detected_faces = face_client.face.detect_with_url(url=single_face_image_url)
if not detected_faces:
	raise Exception('No face detected from image {}'.format(single_image_name))

# Display the detected face ID in the first single-faced image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for x in detected_faces:
Beispiel #21
0
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person

# NOTE: Replace this with a valid Face subscription key.
SUBSCRIPTION_KEY = "INSERT KEY HERE"

# You must use the same region as you used to get your subscription
# keys. For example, if you got your subscription keys from westus,
# replace "westcentralus" with "westus".
#
# Free trial subscription keys are generated in the westcentralus
# region. If you use a free trial subscription key, you shouldn't
# need to change the region.
FACE_LOCATION = "westcentralus"

face_base_url = "https://{}.api.cognitive.microsoft.com".format(FACE_LOCATION)
face_client = FaceClient(face_base_url,
                         CognitiveServicesCredentials(SUBSCRIPTION_KEY))

# This image should contain a single face.
remote_image_URL_1 = "https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"

# This image should contain several faces, at least one of which is similar to the face in remote_image_URL_1.
remote_image_URL_2 = "https://www.biography.com/.image/t_share/MTQ1NDY3OTIxMzExNzM3NjE3/john-f-kennedy---debating-richard-nixon.jpg"


# Detect faces in a remote image.
def detect_faces(face_client, image_url):
    print("Detecting faces...")
    detected_faces = face_client.face.detect_with_url(url=image_url)
    if not detected_faces:
        raise Exception('No face detected from image {}'.format(image_url))
    if not detected_faces[0]:
Beispiel #22
0
class FaceVerification:

    config = configparser.ConfigParser()
    config.read('settings.cfg')

    KEY = config['FACE_IDENTIFICATION']['FACE_IDENTIFICATION_KEY']
    ENDPOINT = config['FACE_IDENTIFICATION']['FACE_IDENTIFICATION_ENDPOINT']
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    isSetUp = False
    test_flag = False

    def __init__(self, target_person_name, test_flag):
        if len(target_person_name) == 0:
            raise ValueError(
                "Cannot supply person name of length 0 to the FaceVerification module"
            )

        self.person_name = target_person_name
        self.target_face_id = self.get_target_face_id()
        self.test_flag = test_flag

        # print("Initialized the FaceVerification module with name", self.person_name,
        #       ", and face_id:", self.target_face_id)

    def set_target_face_id(self, face_id):
        self.target_face_id = face_id

    def get_target_face_id(self):
        img_fns = self.__get_images_of_target_person()
        if img_fns and len(self.__get_face_ids(img_fns)) != 0:
            return self.__get_face_ids(img_fns)[
                0]  # only taking a single face as the target for now
        return None

    def __get_images_of_target_person(self):
        # path = self.person_name
        path = os.path.relpath('user_verification/' + self.person_name)
        img_files = glob.glob(path + "/*.png")
        if len(img_files) == 0:
            # raise Exception("You must first supply some example pictures of the patient in this session")
            if self.test_flag:
                print('No images of target person were found...')
            self.isSetUp = False
            return None
        else:
            return img_files

    def __get_face_ids(self, img_fns):
        face_ids = []
        for img_fn in img_fns:
            detected_faces = self.face_client.face.detect_with_stream(
                image=open(img_fn, 'rb'),
                return_face_id=True,
                recognition_model='recognition_02')
            if len(detected_faces) != 1:
                # print(img_fn, " is rejected, because the number of detected face is not 1")
                continue
            else:
                face_ids.append(detected_faces[0].face_id)

        return face_ids

    def find_verified_face(self, detected_faces):
        """
        Attempts to find and return a face that would belong to the person specified upon initialization of the module.
        If not found, returns None.
        :param detected_faces: a face bunch from azure, where we don't know who they belong to
        :return: A tuple of a face that belongs to the target person, and the confidence. (None, -1) if no face matches.
        """
        for face in detected_faces:
            current_face_id = face.face_id
            target_face_id = self.target_face_id
            # print('cur face: ', current_face_id)
            # print('target face: ', target_face_id)
            verify_result = self.face_client.face.verify_face_to_face(
                current_face_id, target_face_id)

            if verify_result.is_identical:
                if self.test_flag:
                    print("Found the person we're looking for")
                return face, verify_result.confidence
            else:
                if self.test_flag:
                    print("Found a different person, skipping")
                continue

        return None, -1

    def getStatus(self):
        return self.isSetUp

    def setStatus(self, status):
        self.isSetUp = status
Beispiel #23
0
 def __init__(self, faceapi_endpoint, faceapi_subkey, storage_account_name,
              storage_account_key):
     self._face_client = FaceClient(
         faceapi_endpoint, CognitiveServicesCredentials(faceapi_subkey))
     self._blob_client = AzureStorageBlockBlob(storage_account_name,
                                               storage_account_key)
Beispiel #24
0
def face_detection(subscription_key):
    """FaceDetection.

    This will print out all of the facial attributes for a list of images.
    """
    def get_accessories(accessories):
        """Helper function for face_detection sample.

        This will return a string representation of a person's accessories.
        """

        accessory_str = ",".join([str(accessory) for accessory in accessories])
        return accessory_str if accessory_str else "No accessories"

    def get_emotion(emotion):
        """Helper function for face_detection sample.

        This will determine and return the emotion a person is showing.
        """

        max_emotion_value = 0.0
        emotion_type = None

        for emotion_name, emotion_value in vars(emotion).items():
            if emotion_name == "additional_properties":
                continue
            if emotion_value > max_emotion_value:
                max_emotion_value = emotion_value
                emotion_type = emotion_name
        return emotion_type

    def get_hair(hair):
        """Helper function for face_detection sample.

         This determines and returns the hair color detected for a face in an image.
        """

        if not hair.hair_color:
            return "invisible" if hair.invisible else "bald"
        return_color = HairColorType.unknown
        max_confidence = 0.0

        for hair_color in hair.hair_color:
            if hair_color.confidence > max_confidence:
                max_confidence = hair_color.confidence
                return_color = hair_color.color

        return return_color

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(
        endpoint=face_base_url,
        credentials=CognitiveServicesCredentials(subscription_key))
    image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
    image_file_names = [
        "detection1.jpg", "detection2.jpg", "detection3.jpg", "detection4.jpg",
        "detection5.jpg", "detection6.jpg"
    ]
    for image_file_name in image_file_names:
        detected_faces = face_client.face.detect_with_url(
            url=image_url_prefix + image_file_name,
            return_face_attributes=[
                FaceAttributeType.accessories, 'age', 'blur', 'emotion',
                'exposure', 'facialHair', 'gender', 'glasses', 'hair',
                'headPose', 'makeup', 'noise', 'occlusion', 'smile'
            ])
        if not detected_faces:
            raise Exception(
                "No face detected from image {}".format(image_file_name))
        print("{} faces detected from image {}".format(len(detected_faces),
                                                       image_file_name))
        if not detected_faces[0].face_attributes:
            raise Exception(
                "Parameter return_face_attributes of detect_with_stream_async must be set to get face attributes."
            )

        for face in detected_faces:
            print(
                "Face attributes of {}   Rectangle(Left/Top/Width/Height) : {} {} {} {}"
                .format(image_file_name, face.face_rectangle.left,
                        face.face_rectangle.top, face.face_rectangle.width,
                        face.face_rectangle.height))
            print("Face attributes of {}   Accessories : {}".format(
                image_file_name,
                get_accessories(face.face_attributes.accessories)))
            print("Face attributes of {}   Age : {}".format(
                image_file_name, face.face_attributes.age))
            print("Face attributes of {}   Blur : {}".format(
                image_file_name, face.face_attributes.blur.blur_level))
            print("Face attributes of {}   Emotion : {}".format(
                image_file_name, get_emotion(face.face_attributes.emotion)))
            print("Face attributes of {}   Exposure : {}".format(
                image_file_name, face.face_attributes.exposure.exposure_level))
            if face.face_attributes.facial_hair.moustache + face.face_attributes.facial_hair.beard + face.face_attributes.facial_hair.sideburns > 0:
                print("Face attributes of {}   FacialHair : Yes".format(
                    image_file_name))
            else:
                print("Face attributes of {}   FacialHair : No".format(
                    image_file_name))
            print("Face attributes of {}   Gender : {}".format(
                image_file_name, face.face_attributes.gender))
            print("Face attributes of {}   Glasses : {}".format(
                image_file_name, face.face_attributes.glasses))
            print("Face attributes of {}   Hair : {}".format(
                image_file_name, get_hair(face.face_attributes.hair)))
            print(
                "Face attributes of {}   HeadPose : Pitch: {}, Roll: {}, Yaw: {}"
                .format(image_file_name,
                        round(face.face_attributes.head_pose.pitch, 2),
                        round(face.face_attributes.head_pose.roll, 2),
                        round(face.face_attributes.head_pose.yaw, 2)))
            if face.face_attributes.makeup.eye_makeup or face.face_attributes.makeup.lip_makeup:
                print("Face attributes of {}   Makeup : Yes".format(
                    image_file_name))
            else:
                print("Face attributes of {}   Makeup : No".format(
                    image_file_name))
            print("Face attributes of {}   Noise : {}".format(
                image_file_name, face.face_attributes.noise.noise_level))
            print(
                "Face attributes of {}   Occlusion : EyeOccluded: {},   ForeheadOccluded: {},   MouthOccluded: {}"
                .format(
                    image_file_name, "Yes"
                    if face.face_attributes.occlusion.eye_occluded else "No",
                    "Yes" if face.face_attributes.occlusion.forehead_occluded
                    else "No", "Yes" if
                    face.face_attributes.occlusion.mouth_occluded else "No"))

            print("Face attributes of {}   Smile : {}".format(
                image_file_name, face.face_attributes.smile))
Beispiel #25
0
def run_model():
    imageData = request.form["image"]
    screenWidthMm = request.form["screenWidthMm"]
    screenHeightMm = request.form["screenHeightMm"]

    # save image into temp file to pass stream to face api
    tempImagePath = os.path.join(SUBJECT_DATA_PATH + "/frames", IMAGE_FILE)
    urllib.request.urlretrieve(imageData, tempImagePath)
    image = open(tempImagePath, 'rb')

    # call Azure Face API
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    detected_faces = face_client.face.detect_with_stream(
        image,
        return_face_id=False,
        return_face_landmarks=True,
        return_face_attributes=None,
        recognition_model='recognition_01',
        return_recognition_model=False,
        detection_model='detection_01')

    if not detected_faces:
        raise Exception('No face detected from image')

    face = detected_faces[0]

    # Save face dimensions in face rectangle and face landmarks
    appleFace = {'H': [], 'W': [], 'X': [], 'Y': [], 'IsValid': []}
    appleLeftEye = {'H': [], 'W': [], 'X': [], 'Y': [], 'IsValid': []}
    appleRightEye = {'H': [], 'W': [], 'X': [], 'Y': [], 'IsValid': []}

    addFaceValues(appleFace, face, True, False)
    addFaceValues(appleLeftEye, face, False, True)
    addFaceValues(appleRightEye, face, False, False)

    # Save into json files for prepareDataset to crop image
    facePath = os.path.join(SUBJECT_DATA_PATH, 'appleFace.json')
    with open(facePath, 'w') as outfile:
        json.dump(appleFace, outfile)

    leftEyePath = os.path.join(SUBJECT_DATA_PATH, 'appleLeftEye.json')
    with open(leftEyePath, 'w') as outfile:
        json.dump(appleLeftEye, outfile)

    rightEyePath = os.path.join(SUBJECT_DATA_PATH, 'appleRightEye.json')
    with open(rightEyePath, 'w') as outfile:
        json.dump(appleRightEye, outfile)

    # run facegrid matlab script to get facegrid json
    frameW, frameH = Image.open(tempImagePath).size
    gridW = 25
    gridH = 25
    labelFaceGrid = faceGridFromFaceRect(frameW, frameH, gridW, gridH,
                                         appleFace['X'], appleFace['Y'],
                                         appleFace['W'], appleFace['H'])

    faceGrid = {
        'H': [labelFaceGrid[0][3]],
        'W': [labelFaceGrid[0][2]],
        'X': [labelFaceGrid[0][0]],
        'Y': [labelFaceGrid[0][1]],
        'IsValid': [1]
    }
    faceGridPath = os.path.join(SUBJECT_DATA_PATH, 'faceGrid.json')
    with open(faceGridPath, 'w') as outfile:
        json.dump(faceGrid, outfile)

    # generate frames.json
    frames = [IMAGE_FILE]
    framesPath = os.path.join(SUBJECT_DATA_PATH, 'frames.json')
    with open(framesPath, 'w') as outfile:
        json.dump(frames, outfile)

    # theres a bug where numpy can turn a 1x1 array into a single number, so i just copy this subject and pretend there are 2
    if os.path.exists(DATASET_PATH + "/00001"):
        rmtree(DATASET_PATH + "/00001")
    copytree(DATASET_PATH + "/00000", DATASET_PATH + "/00001")

    # run prepareDataset to generate cropped images
    prepareInput(DATASET_PATH, DATASET_OUTPUT_PATH)

    # run model with new cropped images
    tensor = runModel(DATASET_OUTPUT_PATH)[0]

    # coordinates are distance from camera in cm
    cam_x_cm, cam_y_cm = (tensor[0].item(), tensor[1].item())

    # convert the camera coordinates to screen coordinates in cm
    #
    # orientation:
    #   1 = portrait
    #   2 = portrait upside down
    #   3 = landscape w/ home button on the right
    #   4 = landscape w/ home button on the left
    orientation = 1
    screen_x_cm, screen_y_cm = cam2screen(cam_x_cm, cam_y_cm, orientation,
                                          screenWidthMm, screenHeightMm)

    # delete temp file
    os.remove(tempImagePath)

    # for now, send back the (x, y) they're looking on the screen in cm
    return str(screen_x_cm) + " " + str(screen_y_cm)
Beispiel #26
0
def image_analysis_in_stream(subscription_key):
    """ImageAnalysisInStream.

    This will analyze an image from a stream and return all available features.
    """

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(face_base_url,
                             CognitiveServicesCredentials(subscription_key))

    faces = [
        jpgfile for jpgfile in os.listdir(IMAGES_FOLDER)
        if jpgfile.startswith("Family1")
    ]
    faces_ids = []

    for face in faces:
        with open(os.path.join(IMAGES_FOLDER, face), "rb") as face_fd:
            # result type: azure.cognitiveservices.vision.face.models.DetectedFace
            result = face_client.face.detect_with_stream(
                face_fd,
                # You can use enum from FaceAttributeType, or direct string
                return_face_attributes=[
                    FaceAttributeType.age,  # Could have been the string 'age'
                    'gender',
                    'headPose',
                    'smile',
                    'facialHair',
                    'glasses',
                    'emotion',
                    'hair',
                    'makeup',
                    'occlusion',
                    'accessories',
                    'blur',
                    'exposure',
                    'noise'
                ])

        if not result:
            print("Unable to detect any face in {}".format(face))

        detected_face = result[0]
        faces_ids.append(detected_face.face_id)

        print("\nImage {}".format(face))
        print("Detected age: {}".format(detected_face.face_attributes.age))
        print("Detected gender: {}".format(
            detected_face.face_attributes.gender))
        print("Detected emotion: {}".format(
            detected_face.face_attributes.emotion.happiness))
    print("\n")

    # Verification example for faces of the same person.
    verify_result = face_client.face.verify_face_to_face(
        faces_ids[0],
        faces_ids[1],
    )
    if verify_result.is_identical:
        print(
            "Faces from {} & {} are of the same (Positive) person, similarity confidence: {}."
            .format(faces[0], faces[1], verify_result.confidence))
    else:
        print(
            "Faces from {} & {} are of different (Negative) persons, similarity confidence: {}."
            .format(faces[0], faces[1], verify_result.confidence))

    # Verification example for faces of different persons.
    verify_result = face_client.face.verify_face_to_face(
        faces_ids[1],
        faces_ids[2],
    )
    if verify_result.is_identical:
        print(
            "Faces from {} & {} are of the same (Positive) person, similarity confidence: {}."
            .format(faces[1], faces[2], verify_result.confidence))
    else:
        print(
            "Faces from {} & {} are of different (Negative) persons, similarity confidence: {}."
            .format(faces[1], faces[2], verify_result.confidence))
Beispiel #27
0
COGSVCS_KEY = os.environ["COGSVCS_KEY"]
COGSVCS_REGION = 'northcentralus'

# Create vision_client
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import ComputerVisionErrorException

vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials)

# Create face_client
from azure.cognitiveservices.vision.face import FaceClient

face_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
face_client = FaceClient(COGSVCS_CLIENTURL, face_credentials)

person_group_id = 'reactor'

# Create the application
app = Flask(__name__)


@app.route("/", methods=["GET"])
def index():
    return render_template("index.html")


@app.route("/translate", methods=["GET", "POST"])
def translate():
    # Load image or placeholder
Beispiel #28
0
def find_similar_in_large_face_list(subscription_key):
    """FindSimilarInLargeFaceList.

    This will detect similar faces from a list of images against a single image by placing the list of images in a large face list.
    """

    face_base_url = "https://{}.api.cognitive.microsoft.com".format(
        FACE_LOCATION)
    face_client = FaceClient(
        endpoint=face_base_url,
        credentials=CognitiveServicesCredentials(subscription_key))
    image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/"
    target_image_file_names = [
        "Family1-Dad1.jpg", "Family1-Daughter1.jpg", "Family1-Mom1.jpg",
        "Family1-Son1.jpg", "Family2-Lady1.jpg", "Family2-Man1.jpg",
        "Family3-Lady1.jpg", "Family3-Man1.jpg"
    ]

    source_image_file_name = "findsimilar.jpg"

    # Create a large face list.
    large_face_list_id = str(uuid.uuid4())
    print("Create large face list {}.".format(large_face_list_id))
    face_client.large_face_list.create(
        large_face_list_id=large_face_list_id,
        name="large face list for find_similar_in_large_face_list sample",
        user_data="large face list for find_similar_in_large_face_list sample")

    for target_image_file_name in target_image_file_names:
        faces = face_client.large_face_list.add_face_from_url(
            large_face_list_id=large_face_list_id,
            url=image_url_prefix + target_image_file_name,
            user_data=target_image_file_name)
        if not faces:
            raise Exception("No face detected from image {}.".format(
                target_image_file_name))
        print(
            "Face from image {} is successfully added to the large face list.".
            format(target_image_file_name))

    # Start to train the large face list.
    print("Train large face list {}".format(large_face_list_id))
    face_client.large_face_list.train(large_face_list_id=large_face_list_id)

    training_status = face_client.large_face_list.get_training_status(
        large_face_list_id=large_face_list_id)
    if training_status.status == TrainingStatusType.failed:
        raise Exception("Training failed with message {}.".format(
            training_status.message))

    # Get persisted faces from the large face list.
    persisted_faces = face_client.large_face_list.list_faces(
        large_face_list_id)
    if not persisted_faces:
        raise Exception("No persisted face in large face list {}.".format(
            large_face_list_id))

    # Detect faces from source image url.
    detected_faces = _detect_faces_helper(face_client=face_client,
                                          image_url=image_url_prefix +
                                          source_image_file_name)

    # Find similar example of face id to large face list.
    similar_results = face_client.face.find_similar(
        face_id=detected_faces[0].face_id,
        large_face_list_id=large_face_list_id)

    for similar_result in similar_results:
        persisted_faces = [
            pf for pf in persisted_faces
            if pf.persisted_face_id == similar_result.persisted_face_id
        ]
        if not persisted_faces:
            print("persisted face not found in similar result.")
            continue
        persisted_face = persisted_faces[0]
        print("Faces from {} & {} are similar with confidence: {}.".format(
            source_image_file_name, persisted_face.user_data,
            similar_result.confidence))

    # Delete the large face list.
    face_client.large_face_list.delete(large_face_list_id=large_face_list_id)
    print("Delete large face list {}.\n".format(large_face_list_id))
import os
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

# API key and Resource endpoint for Azure Cognitive Services : Vision (Face) API
# Setup a free account with Azure and create resource
KEY = os.getenv('FACE_RESOURCE_KEY')
ENDPOINT = os.getenv('FACE_RESOURCE_ENDPOINT')

# Authenticate the client
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))

# Detect faces
# path to image with single face
single_face_image_path = 'images/jfk/jfk.jpg'
single_image_name = os.path.basename(single_face_image_path)

# Open Image
image = open(single_face_image_path, 'r+b')

# Azure Cognitive Service Face API Function
detected_faces = face_client.face.detect_with_stream(image=image, detection_model='detection_03')

# Always close opened files
image.close()

# Check if the returned response has detected faces in it
# Raise exception if there is no detected face
if not detected_faces:
    raise Exception('No face detected from image {}'.format(single_image_name))
Beispiel #30
0
import random, os, io, base64
from flask import Flask, render_template, request, jsonify
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

credentials = CognitiveServicesCredentials(os.environ['face_api_key'])
face_client = FaceClient(os.environ['face_api_endpoint'],
                         credentials=credentials)

emotions = [
    'anger', 'neutral', 'disgust', 'fear', 'happiness', 'sadness', 'surprise'
]


#emotion is the JSON parameter that provides the percentage breakdown of eah emotion displayed by
#the user's face.
#emotions is a dictionary that stores the percentages for 8 of these emotions.
#best_emotions() returns the name of the emotion with the highest percentage.
def best_emotion(emotion):
    emotions = {}
    emotions['anger'] = emotion.anger
    #emotions['contempt'] = emotion.contempt
    emotions['disgust'] = emotion.disgust
    emotions['fear'] = emotion.fear
    emotions['happiness'] = emotion.happiness
    emotions['neutral'] = emotion.neutral
    emotions['sadness'] = emotion.sadness
    emotions['surprise'] = emotion.surprise
    return max(zip(emotions.values(), emotions.keys()))[1]