Exemple #1
0
save_path = 'image/dst/test.jpg'

# traning() 後に行う時に使う
test_image = 'image/'  # 画像のパス
select_name = 'AragakiYui'  # 結果で表示する際の選択する人物['AragakiYui', 'HoshinoGen', 'ManoErina']

# Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
# This key will serve all examples in this document.
KEY = os.environ['FACE_SUBSCRIPTION_KEY']

# Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = os.environ['FACE_ENDPOINT']

# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))

# Detect a face in an image that contains a single face


# 画像内の顔の検出
def face_detected(face_data, image_name):
    """return detected_faces, image_face_ID
    
    顔の検出"""
    # We use detection model 2 because we are not retrieving attributes.
    detected_faces = face_client.face.detect_with_stream(
        image=face_data,
        recognition_model="recognition_03",
        detectionModel='detection_02')
    if not detected_faces:
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType

# Create an authenticated FaceClient.
face_client = FaceClient('Endpoint', CognitiveServicesCredentials('KEY'))

# Detect a face in an image that contains a single face
single_face_image_url = 'Your_image_URL'
single_image_name = os.path.basename(single_face_image_url)
detected_faces = face_client.face.detect_with_url(url=single_face_image_url)
if not detected_faces:
    raise Exception('No face detected from image {}'.format(single_image_name))

# Display the detected face ID in the first single-face image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for face in detected_faces: 
    print (face.face_id)
print()
def findFace(url):
    # Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
    # This key will serve all examples in this document.
    KEY = '597b454dab8e43fda9474e5ca49f963a'
    #os.getenv('COGNITIVE_SERVICE_KEY')

    # Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
    # This endpoint will be used in all examples in this quickstart.
    ENDPOINT = 'https://ai-project-4.cognitiveservices.azure.com/'
    #os.getenv('FACE_ENDPOINT')

    # Create an authenticated FaceClient.
    face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
    # Detect a face in an image that contains a single face
    single_face_image_url = url
    single_image_name = os.path.basename(single_face_image_url)
    detected_faces = face_client.face.detect_with_url(
        url=single_face_image_url)
    if not detected_faces:
        print('\nThere are no detected faces in the image ', single_image_name,
              "\n")
        return False
    #raise Exception('No face detected from image {}'.format(single_image_name))

    # Display the detected face ID in the first single-face image.
    # Face IDs are used for comparison to faces (their IDs) detected in other images.
    print('Detected face ID from', single_image_name, ':')
    for face in detected_faces:
        print(face.face_id)
    print()

    # Save this ID for use in Find Similar
    first_image_face_ID = detected_faces[0].face_id

    # Detect the faces in an image that contains multiple faces
    # Each detected face gets assigned a new ID
    multi_face_image_url = url
    multi_image_name = os.path.basename(multi_face_image_url)
    detected_faces2 = face_client.face.detect_with_url(
        url=multi_face_image_url)
    # Search through faces detected in group image for the single face from first image.
    # First, create a list of the face IDs found in the second image.
    second_image_face_IDs = list(map(lambda x: x.face_id, detected_faces2))
    # Next, find similar face IDs like the one detected in the first image.
    similar_faces = face_client.face.find_similar(
        face_id=first_image_face_ID, face_ids=second_image_face_IDs)
    if not similar_faces[0]:
        print('No similar faces found in', multi_image_name, '.')
        return False
    # Print the details of the similar faces detected
    print('Similar faces found in', multi_image_name + ':')
    for face in similar_faces:
        first_image_face_ID = face.face_id
        # The similar face IDs of the single face image and the group image do not need to match,
        # they are only used for identification purposes in each image.
        # The similar faces are matched using the Cognitive Services algorithm in find_similar().
        face_info = next(x for x in detected_faces2
                         if x.face_id == first_image_face_ID)
        if face_info:
            print('  Face ID: ', first_image_face_ID)
            print('  Face rectangle:')
            print('    Left: ', str(face_info.face_rectangle.left))
            print('    Top: ', str(face_info.face_rectangle.top))
            print('    Width: ', str(face_info.face_rectangle.width))
            print('    Height: ', str(face_info.face_rectangle.height))

        return True

    return False
import requests
import io
import sys
import os
import uuid
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

key = '346d220ffb134f7799e990e819b43951'
endPoint = 'https://cristianpalta.cognitiveservices.azure.com/'

face_client = FaceClient(endPoint, CognitiveServicesCredentials(key))

print("Estoy conectado")

simple_face_image_url = 'https://twitter.com/CristianPalta1/photo.'
simple_image_name = os.path.basename(simple_face_image_url)
detected_faces = face_client.face.detect_with_url(url=simple_face_image_url)

if not detected_faces:
    raise Exception('No enconre caras :(')
print('Encontre caras y su Id es: ', simple_image_name)

# for face in similar_face:
#     first_image_face_ID = face.face_id
#python faceId.py
Exemple #5
0
TARGET_ENDPOINT = os.environ["FACE_ENDPOINT2"]
# Target subscription key. Must match the target endpoint region/subscription.
TARGET_KEY = os.environ['FACE_SUBSCRIPTION_KEY2']
# Target subscription ID. It will be the same as the source ID if created Face resources from the
# same subscription (but moving from region to region). If they are different subscriptions, add the other target ID here.
TARGET_ID = os.environ['AZURE_SUBSCRIPTION_ID']
# NOTE: We do not need to specify the target PersonGroup ID here because we generate it with this example.
# Each new location you transfer a person group to will have a generated, new person group ID for that region.
# </snippet_snapshotvars>
'''
Authenticate
All examples use the same client, except for Snapshot Operations.
'''
# <snippet_auth>
# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
# </snippet_auth>
'''
END - Authenticate
'''
'''
Detect faces in two images
'''
print('-----------------------------')
print()
print('DETECT FACES')
print()
# <snippet_detect>
# Detect a face in an image that contains a single face
single_face_image_url = 'https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg'
single_image_name = os.path.basename(single_face_image_url)
Exemple #6
0
def get_face_client(endpoint, key):
    return FaceClient(endpoint, CognitiveServicesCredentials(key))
Exemple #7
0
import random, os, io, base64
from flask import Flask, render_template, request, jsonify
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

credentials = CognitiveServicesCredentials(os.environ['face_api_key'])
face_client = FaceClient(os.environ['face_api_endpoint'],
                         credentials=credentials)

emotions = [
    'anger', 'neutral', 'disgust', 'fear', 'happiness', 'sadness', 'surprise'
]


#emotion is the JSON parameter that provides the percentage breakdown of eah emotion displayed by
#the user's face.
#emotions is a dictionary that stores the percentages for 8 of these emotions.
#best_emotions() returns the name of the emotion with the highest percentage.
def best_emotion(emotion):
    emotions = {}
    emotions['anger'] = emotion.anger
    #emotions['contempt'] = emotion.contempt
    emotions['disgust'] = emotion.disgust
    emotions['fear'] = emotion.fear
    emotions['happiness'] = emotion.happiness
    emotions['neutral'] = emotion.neutral
    emotions['sadness'] = emotion.sadness
    emotions['surprise'] = emotion.surprise
    return max(zip(emotions.values(), emotions.keys()))[1]

Exemple #8
0
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face import FaceClient
from azure.cognitiveservices.search.imagesearch import ImageSearchAPI

from .utils import settings

FACE_KEY = settings("azure", "face", "key")
FACE_ENDPOINT = settings("azure", "face", "endpoint")

BING_KEY = settings("azure", "bing", "key")
BING_ENDPOINT = settings("azure", "bing", "endpoint")

face_client = FaceClient(FACE_ENDPOINT, CognitiveServicesCredentials(FACE_KEY))
bing_client = ImageSearchAPI(CognitiveServicesCredentials(BING_KEY),
                             base_url=BING_ENDPOINT)
    FACE_END = os.getenv('FACE_END')
    LINE_SECRET = os.getenv('LINE_SECRET')
    LINE_TOKEN = os.getenv('LINE_TOKEN')
    IMGUR_CONFIG = {
        "client_id": os.getenv('IMGUR_ID'),
        "client_secret": os.getenv('IMGUR_SECRET'),
        "access_token": os.getenv('IMGUR_ACCESS'),
        "refresh_token": os.getenv('IMGUR_REFRESH')
    }

CV_CLIENT = ComputerVisionClient(
    ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
LINE_BOT = LineBotApi(LINE_TOKEN)
HANDLER = WebhookHandler(LINE_SECRET)
IMGUR_CLIENT = Imgur(config=IMGUR_CONFIG)
FACE_CLIENT = FaceClient(FACE_END, CognitiveServicesCredentials(FACE_KEY))
PERSON_GROUP_ID = "forinragen02"




def azure_describe(url):
    """
    Output azure image description result
    """
    description_results = CV_CLIENT.describe_image(url)
    output = ""
    for caption in description_results.captions:
        output += "'{}' with confidence {:.2f}% \n".format(
            caption.text, caption.confidence * 100)
    return output
COGSVCS_KEY = os.environ["COGSVCS_KEY"]
COGSVCS_REGION = 'northcentralus'

# Create vision_client
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import ComputerVisionErrorException

vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials)

# Create face_client
from azure.cognitiveservices.vision.face import FaceClient

face_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
face_client = FaceClient(COGSVCS_CLIENTURL, face_credentials)

person_group_id = "reactor"

# Create the application
app = Flask(__name__)


@app.route("/", methods=["GET"])
def index():
    return render_template("index.html")


@app.route("/translate", methods=["GET", "POST"])
def translate():
    # Load image or placeholder
Exemple #11
0
import logging

import azure.functions as func

from typing import List

from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials

from config import DefaultConfig
from dao import PeopleDAO, UserDAO

CONFIG = DefaultConfig()

face_client = FaceClient(
    CONFIG.COGNITIVE_SERVICES_ENDPOINT,
    CognitiveServicesCredentials(CONFIG.COGNITIVE_SERVICES_KEY))
people_dao = PeopleDAO(face_client=face_client, config=CONFIG)
user_dao = UserDAO(config=CONFIG)


def main(
    mytimer: func.TimerRequest
) -> None:  # should be called once a week ("0 30 9 * * 1")
    utc_timestamp = datetime.datetime.utcnow().replace(
        tzinfo=datetime.timezone.utc).isoformat()

    clean_data()

    logging.info('Python timer trigger function ran at %s', utc_timestamp)
def initFaceClient():
    KEY = '5250cc4496a74ceca28563b13b083a01'
    ENDPOINT = 'https://westeurope.api.cognitive.microsoft.com/'
    return FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
Exemple #13
0
def payment():
    if request.method == 'POST' and request.files:
        imagex = request.files["image"]
        imagex.save(os.path.join(app.config["IMAGE_UPLOADS"], imagex.filename))

        KEY = '2698a28d0b3a47be9a0177011b4fca38'
        ENDPOINT = 'https://hackcovid.cognitiveservices.azure.com/'  # Replace with your regional Base URL

        face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
        test_image_array = glob.glob(
            os.path.join(app.config["IMAGE_UPLOADS"], imagex.filename))
        image = open(test_image_array[0], 'r+b')

        # Detect faces
        face_ids = []
        faces = face_client.face.detect_with_stream(image)
        for face in faces:
            face_ids.append(face.face_id)

        print(face_ids)
        # Identify faces
        results = face_client.face.identify(face_ids, personGroupId)
        print('Identifying faces in {}'.format(os.path.basename(image.name)))
        if not results:
            print(
                'No person identified in the person group for faces from {}.'.
                format(os.path.basename(image.name)))
        res = 0
        for person in results:
            print(
                'Person for face ID {} is identified in {} with a confidence of {}.'
                .format(person.face_id, os.path.basename(image.name),
                        person.candidates[0].confidence
                        ))  # Get topmost confidence score
            print(person.candidates[0].person_id)
            res = person.candidates[0].person_id

        print(res)
        connect = sqlite3.connect("Face-DataBase")
        c = connect.cursor()
        c.execute("SELECT * FROM Students WHERE personID = ?", (res, ))
        row = c.fetchone()
        print(row[1] + " recognized")

        c = connect.cursor()
        c.execute("SELECT * FROM Students WHERE personID = ?", (res, ))
        row = c.fetchone()

        account_1 = request.form.get('publickey')
        # account_1 = account_1[1:-1]
        account_2 = row[4]

        private_key = request.form.get('privatekey')
        # private_key = private_key[1:-1]
        print('private_key', private_key)

        amount = float(request.form.get('amount'))
        print(amount)
        connect.commit()
        connect.close()
        ganache_url = "http://ec2-54-175-197-129.compute-1.amazonaws.com:8545"
        web3 = Web3(Web3.HTTPProvider(ganache_url))
        web3.eth.defaultAccount = account_1
        nonce = web3.eth.getTransactionCount(account_1)
        if int(web3.eth.getBalance(account_1)) < int(amount * (10**18)):
            return jsonify({"status": "error"})
        print(nonce)
        tx = {
            'nonce': nonce,
            'to': account_2,
            'value': web3.toWei(amount, 'ether'),
            'gas': 2000000,
            'gasPrice': web3.toWei('50', 'gwei'),
        }

        signed_tx = web3.eth.account.signTransaction(tx, private_key)

        tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)
        print(web3.toHex(tx_hash))
        return jsonify({"status": "success", "tx_hash": web3.toHex(tx_hash)})
    else:
        return render_template('index.html')
Exemple #14
0
def addinfo():
    if request.method == 'POST' and request.files and int(
            request.form.get('type')) == 2:
        imagex = request.files["image"]
        imagex.save(os.path.join(app.config["IMG_MEDICINES"], imagex.filename))

        KEY = '2698a28d0b3a47be9a0177011b4fca38'
        ENDPOINT = 'https://hackcovid.cognitiveservices.azure.com/'

        face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
        test_image_array = glob.glob(
            os.path.join(app.config["IMG_MEDICINES"], imagex.filename))
        image = open(test_image_array[0], 'r+b')

        # Detect faces
        face_ids = []
        faces = face_client.face.detect_with_stream(image)
        for face in faces:
            face_ids.append(face.face_id)

        print(face_ids)
        # Identify faces
        results = None
        if face_ids:
            results = face_client.face.identify(face_ids, personGroupId)
            print('Identifying faces in {}'.format(os.path.basename(
                image.name)))
        if not results:
            print(
                'No person identified in the person group for faces from {}.'.
                format(os.path.basename(image.name)))
            return jsonify({"error": "none"})
            return
        res = 0
        for person in results:
            print(
                'Person for face ID {} is identified in {} with a confidence of {}.'
                .format(person.face_id, os.path.basename(image.name),
                        person.candidates[0].confidence
                        ))  # Get topmost confidence score
            print(person.candidates[0].person_id)
            res = person.candidates[0].person_id

        print(res)
        connect = sqlite3.connect("Face-DataBase")
        c = connect.cursor()
        c.execute("SELECT * FROM Students WHERE personID = ?", (res, ))
        row = c.fetchone()
        print(row[1] + " recognized")

        c = connect.cursor()
        c.execute("SELECT * FROM Students WHERE personID = ?", (res, ))
        row = c.fetchone()
        publickey = row[4]
        ganache_url = "http://ec2-54-175-197-129.compute-1.amazonaws.com:8545"
        web3 = Web3(Web3.HTTPProvider(ganache_url))
        abi = json.loads(
            '[{"constant":false,"inputs":[{"name":"medicine","type":"string"}],"name":"addmedicines","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"stat","type":"int256"}],"name":"updatestatus","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"getmedicines","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getstatus","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"medicines","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"status","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]'
        )
        address = web3.toChecksumAddress(
            "0xC70003Ef71fdEa97A785183615B871A99cFB73b7")
        contract = web3.eth.contract(address=address, abi=abi)
        web3.eth.defaultAccount = publickey

        medicines = request.form.get('medicines')
        contract.functions.addmedicines(medicines).transact()
        print(request.form.get('status'))
        status = int(request.form.get('status'))
        contract.functions.updatestatus(status).transact()

        return jsonify({"status": 'success'})