def trainFace(name, identity):
    with open(tmpImgPath, 'rb') as f:
        npImg = comm.bytes2NpImg(f.read())

    if npImg is None:
        return MSG['EMPTY']

    npImg = scaledImg(npImg)

    if identity not in FaceProcessor.persons:
        FaceProcessor.persons[identity] = name
    elif FaceProcessor.persons[identity] != name:
        FaceProcessor.persons[identity] = name

    align, net = FaceProcessor.align, FaceProcessor.net

    bb = align.getLargestFaceBoundingBox(npImg)
    if bb is None:
        content = comm.npImg2Bytes(npImg)
        msg = MSG['PROCEEDED']
        msg['detail'] = 'noFace'
        msg['content'] = content
        return msg
#        return {
#            "type": "proceeded",
#            'detail': 'noFace',
#            "content": content
#        }

    p1 = (bb.left(), bb.top())
    p2 = (bb.right(), bb.bottom())
    cv2.rectangle(npImg, p1, p2, (0, 255, 0), 2)

    alignedFace = align.align(
        96, npImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
    phash = str(imagehash.phash(Image.fromarray(alignedFace)))
    if phash not in FaceProcessor.images:
        rep = net.forward(alignedFace)
        FaceProcessor.images[phash] = {'identity': identity, 'rep': rep}

    content = comm.npImg2Bytes(npImg)
    msg = MSG['PROCEEDED']
    msg['detail'] = 'faceTrained'
    msg['content'] = content
    return msg
Exemple #2
0
def rawFace():
    npImg = getTmpImg()

    if npImg is None:
        return MSG['EMPTY']

    npImg = comm.scaledImg(npImg)

    content = 'data:image/jpg;base64,' + \
        base64.b64encode(comm.npImg2Bytes(npImg))

    msg = MSG['PROCEEDED']
    msg['detail'] = 'rawImg'
    msg['content'] = content
    return msg
def rawFace():
    with open(tmpImgPath, 'rb') as f:
        npImg = comm.bytes2NpImg(f.read())

    if npImg is None:
        return MSG['EMPTY']

    npImg = scaledImg(npImg)

    content = comm.npImg2Bytes(npImg)
    #    msg = {
    #        "type": "proceeded",
    #        'detail': 'rawImg',
    #        "content": content
    #    }
    msg = MSG['PROCEEDED']
    msg['detail'] = 'rawImg'
    msg['content'] = content
    return msg
def detectFace():
    with open(tmpImgPath, 'rb') as f:
        npImg = comm.bytes2NpImg(f.read())

    if npImg is None:
        return MSG['EMPTY']

    npImg = scaledImg(npImg)
    hasFace = FaceDetector.markAllFacesByDlib(npImg)

    content = comm.npImg2Bytes(npImg)
    #    msg = {
    #        "type": "proceeded",
    #        'detail': 'faceDetected' if hasFace else 'noFace',
    #        "content": content
    #    }
    msg = MSG['PROCEEDED']
    msg['detail'] = 'faceDetected' if hasFace else 'noFace'
    msg['content'] = content
    return msg
def recognizeFace():
    #    global recognizerInited
    #    if not recognizerInited:
    #        updateRecognizer()
    #        recognizerInited=True

    le, clf, persons, align, net = \
    FaceRecognizer.le, FaceRecognizer.clf, FaceProcessor.persons, FaceRecognizer.align, FaceRecognizer.net

    if clf is None:
        return MSG['EMPTY']

    with open(tmpImgPath, 'rb') as f:
        npImg = comm.bytes2NpImg(f.read())

    if npImg is None:
        return MSG['EMPTY']

    npImg = scaledImg(npImg)
    bb = align.getLargestFaceBoundingBox(npImg)
    if bb is None:
        content = comm.npImg2Bytes(npImg)
        msg = MSG['PROCEEDED']
        msg['detail'] = 'noFace'
        msg['content'] = content
        return msg
#        return {
#            "type": "proceeded",
#            'detail': 'noFace',
#            "content": content
#        }

    alignedFace = align.align(
        96, npImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
    rep = net.forward(alignedFace)
    rep1 = rep.reshape(1, -1)
    predictions = clf.predict_proba(rep).ravel()
    maxI = np.argmax(predictions)
    confidence = predictions[maxI]
    identity, name = -1, 'unknown'
    if confidence >= 0.7:
        identity = le.inverse_transform(maxI)
        name = persons[identity]


#        phash = str(imagehash.phash(Image.fromarray(alignedFace)))
#        if phash not in FaceProcessor.images:
#            rep = net.forward(alignedFace)
#            FaceProcessor.images[phash] = {
#                'identity' : identity,
#                'rep' : rep
#            }

    p1 = (bb.left(), bb.top())
    p2 = (bb.right(), bb.bottom())
    cv2.rectangle(npImg, p1, p2, (0, 255, 0), 2)
    #    for p in openface.AlignDlib.OUTER_EYES_AND_NOSE:
    #        cv2.circle(npImg, center=landmarks[p], radius=3, color=(0, 255, 0), thickness=-1)
    cv2.putText(npImg,
                str(identity) + ':' + name, (bb.left(), bb.top() - 5),
                cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.6,
                color=(0, 255, 0),
                thickness=2)

    content = comm.npImg2Bytes(npImg)
    #    msg = {
    #        "type": "proceeded",
    #        'detail': 'faceRecognized',
    #        "content": content
    #    }
    msg = MSG['PROCEEDED']
    msg['detail'] = 'faceRecognized'
    msg['content'] = content
    return msg