def detect_by_conditions(video, conditions, face_api=True):
    cur = 0
    faces_images = []

    cap = cv2.VideoCapture(video)
    success, frame = cap.read()
    count = 0

    while success:
        if count % 3 == 0:
            frame = imutils.resize(frame, width=WIDTH)

            face_api_response = detect(frame) if face_api else 0
            if face_api_response is not None and conditions[cur].check_face(frame, face_api_response):
                if face_api_response != 0 and check_same_person(face_api_response['faceId']):
                    print_error('The same person already exists')

                faces_images.append(frame)
                cur += 1

                if cur == len(conditions):
                    return faces_images

        count += 1
        success, frame = cap.read()

    return None
Exemple #2
0
def handle_photo(bot, update):
    try:
        new_file = bot.getFile(update.message['photo'][1]['file_id'])
        new_file.download(
            os.path.join('archive',
                         'img{0}_1.png'.format(str(update.message.chat_id))))
        img = io.imread(
            os.path.join('archive',
                         'img{0}_1.png'.format(str(update.message.chat_id))))
        img = img[:, :, :3].copy()
        char = api.detect(img)[0]

        bot.sendMessage(
            chat_id=update.message.chat_id,
            text='Футболист - {0}\nСборная - {1}\nТочность - {2}'.format(
                char['name'].split('.')[0], char['country'], char['mn']))
        bot.sendPhoto(chat_id=update.message.chat_id,
                      photo=open(
                          os.path.join(os.path.join('teams', char['country']),
                                       '_'.join(char['name'].split(" "))),
                          'rb'))
    except Exception as e:
        print(e)
        bot.sendMessage(
            chat_id=update.message.chat_id,
            text=
            "Произошла ошибка. Убедитесь, что на фотографии лицо видно четко.")
def simple_identify(video):
    cap = cv2.VideoCapture(video)
    success, frame = cap.read()
    count = 0

    faces_ids = []

    while success:
        if count % 5 == 0:
            frame = imutils.resize(frame, width=WIDTH)

            face_api_response = detect(frame)

            if face_api_response is not None:
                faces_ids += [face_api_response['faceId']]

                if len(faces_ids) == 5:
                    break

        count += 1
        success, frame = cap.read()

    if len(faces_ids) < 5:
        print_error('The video does not follow requirements')

    person_id = identify_person_id(faces_ids)

    if person_id is None:
        print_error('The person was not found')

    return person_id
def detect_all(video):
    faces_images = []

    cap = cv2.VideoCapture(video)
    success, frame = cap.read()
    count = 0

    while success:
        if count % 5 == 0:
            frame = imutils.resize(frame, width=WIDTH)

            face_api_response = detect(frame)

            if face_api_response is not None:
                if check_same_person(face_api_response['faceId']):
                    print_error('The same person already exists')

                faces_images.append(frame)

                if len(faces_images) == 5:
                    return faces_images

        count += 1
        success, frame = cap.read()

    return None
Exemple #5
0
def detect_all(video):
    faces_images = []

    cap = cv2.VideoCapture(video)
    success, frame = cap.read()
    count = 0

    while success:
        if count % 5 == 0:
            frame = imutils.resize(frame, width=WIDTH)

            face_api_response = detect(frame)

            if face_api_response is not None:
                faces_images.append(frame)

                if len(faces_images) == 5:
                    return faces_images

        count += 1
        success, frame = cap.read()

    return None
def detect_by_conditions_in_any_order(video, conditions, face_api=True):
    completed = 0
    faces_images = []

    cap = cv2.VideoCapture(video)
    success, frame = cap.read()

    while success:
        frame = imutils.resize(frame, width=WIDTH)
        face_api_response = detect(frame) if face_api else 0
        if face_api_response is not None:
            for condition in conditions:
                if condition.check_face(frame, face_api_response):
                    faces_images.append(frame)
                    completed += 1
                    break

            if completed == len(conditions):
                return faces_images

        success, frame = cap.read()

    return None
def identify(video, actions):
    cap = cv2.VideoCapture(video)
    success, frame = cap.read()

    actions = list(map(action_to_face_condition, actions))

    count = 0
    cur = 0

    faces_ids = []

    while success:
        if count % 5 == 0:
            frame = imutils.resize(frame, width=WIDTH)

            face_api_response = detect(frame)

            if face_api_response is not None and actions[cur].check(frame, face_api_response):
                faces_ids += [face_api_response['faceId']]

                cur += 1
                if cur == len(actions):
                    break

        count += 1
        success, frame = cap.read()

    if cur < len(actions):
        print_error('The video does not follow requirements')

    person_id = identify_person_id(faces_ids)

    if person_id is None:
        print_error('The person was not found')

    return person_id
Exemple #8
0
        tY = tsize * desiredLeftEye
        M[0, 2] += (tX - eyesCenter[0])
        M[1, 2] += (tY - eyesCenter[1])

        (w, h) = (tsize, tsize)
        output = cv2.warpAffine(im, M, (w, h), flags=cv2.INTER_CUBIC)
        crop.append(output)
    return crop


#开启摄像头人脸检测
cap = cv2.VideoCapture(0)
while 1:
    ret, frame = cap.read()  #读取视频
    frame = cv2.flip(frame, 1)
    res = detect(frame)
    # print('res',res,len(res))
    if len(res) > 0:

        #frame = puttext(frame,res)
        crop_image = face_align(frame, res)
        crop_image = np.array(crop_image)
        #print(crop_image.shape)
        #cv2.imshow('face',crop_image[0])
        #cv2.imshow('face:',crop_image)
        # frame = crop_image[0]
        n, w, h, c = crop_image.shape
        # for i in range(n-1):
        #     frame = np.concatenate([crop_image[i,:,:,:],crop_image[i+1,:,:,:]],axis=0)
        if n == 1:
            frame = crop_image[0]
Exemple #9
0
import api
import GetReady
import voicecontrol as voice

group_id = "test"

if __name__ == '__main__':
    print("-----------------进入人脸识别系统-------------")
    voice.player("进入人脸识别系统")
    image = GetReady.getIm()
    # print(s)

    #  image = api.get_file_content(s)
    imageType = "BASE64"

    user = api.detect(image, imageType)
    print(user)

    print("他的年龄:", user['result']['face_list'][0]["age"])
    if user['result']['face_list'][0]["gender"]['type'] == "male":
        voice.player("看上去是一位" + str(user['result']['face_list'][0]["age"]) +
                     "岁的男士")
    else:
        voice.player("看上去是一位" + str(user['result']['face_list'][0]["age"]) +
                     "岁的女士")

    print("正在注册你的信息....")
    voice.player("正在注册你的信息....")
    print("你的名字:")
    voice.player("你的名字是:")
    feedback = voice.my_record(1737)