コード例 #1
0
import openface
import pandas as pd
import pickle
import argparse
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder

imgDim = 96

dlib_shape_predictor_path = '../models/dlib/shape_predictor_68_face_landmarks.dat'

# aligning face using openface which in turn uses dlib
fa_openface = openface.AlignDlib(dlib_shape_predictor_path)

# Torch Neural Net to get the face embeddings
face_encoder = openface.TorchNeuralNet('../models/openface/nn4.small2.v1.t7', imgDim)

# svm classifier model file
svm_classifier_filename = "./classifier.pkl"


def training(training_data):
    """
    Training our classifier (Linear SVC). Saving model using pickle.
    We need to have only one person/face per picture.
    :param people: people to classify and recognize
    """

    # get the embeddings and labels to process further for SVM model training
    df = get_embeddings_label_dataframe(training_data)
コード例 #2
0
ファイル: compare2.py プロジェクト: sivadonkada/EproctorX-1
from sys import argv
import numpy as np
np.set_printoptions(precision=2)

import openface
import cv2
import database

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '../../openface', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')

align = openface.AlignDlib(
    os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
net = openface.TorchNeuralNet(
    os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'), 96)

face_visible = -1


def getRep(img):
    #if args.verbose:
    #    print("Processing {}.".format(imgPath))
    #print "1*************************************"
    bgrImg = np.array(img)
    #print "2*******************************************"

    global face_visible

    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
コード例 #3
0
                        help='Path to dlib Face Predictor',
                        default=os.path.join(
                            dlibModelDir,
                            'shape_predictor_68_face_landmarks.dat'))

    parser.add_argument('--networkModel',
                        type=str,
                        help="Path to Torch network model.",
                        default=os.path.join(openfaceModelDir,
                                             'nn4.small2.v1.t7'))
    parser.add_argument('--imgDim',
                        type=int,
                        help="Default image dimension.",
                        default=96)
    parser.add_argument('--cuda', action='store_true')

    parser.add_argument(
        'workDir',
        type=str,
        help="The input work directory containing 'reps.csv' and 'labels.csv'. "
        "Obtained from aligning a directory with 'align-dlib' and getting the"
        " representations with 'batch-represent'.")

    args = parser.parse_args()

    align = openface.AlignDlib(args.dlibFacePredictor)
    net = openface.TorchNeuralNet(args.networkModel,
                                  imgDim=args.imgDim,
                                  cuda=args.cuda)
    train(args)
コード例 #4
0
openfaceDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
modelDir = os.path.join(openfaceDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')

exampleImages = os.path.join(openfaceDir, 'images', 'examples')
lfwSubset = os.path.join(openfaceDir, 'data', 'lfw-subset')

dlibFacePredictor = os.path.join(dlibModelDir,
                                 "shape_predictor_68_face_landmarks.dat")
model = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
imgDim = 96

align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(model, imgDim=imgDim)


def test_pipeline():
    imgPath = os.path.join(exampleImages, 'lennon-1.jpg')
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}".format(imgPath))
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
    # assert np.isclose(norm(rgbImg), 11.1355)

    bb = align.getLargestFaceBoundingBox(rgbImg)
    assert bb.left() == 341
    assert bb.right() == 1006
    assert bb.top() == 193
    assert bb.bottom() == 859
コード例 #5
0
ファイル: compare.py プロジェクト: crescent-igor/face-compare
                    default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim',
                    type=int,
                    help="Default image dimension.",
                    default=96)
parser.add_argument('--verbose', action='store_true')

args = parser.parse_args()

if args.verbose:
    print("Argument parsing and loading libraries took {} seconds.".format(
        time.time() - start))

start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
if args.verbose:
    print("Loading the dlib and OpenFace models took {} seconds.".format(
        time.time() - start))


def getRep(imgPath):
    if args.verbose:
        print("Processing {}.".format(imgPath))
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}".format(imgPath))
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    if args.verbose:
        print("  + Original size: {}".format(rgbImg.shape))
コード例 #6
0
def face_augmentation_server():
    print 'starting up!'

    align = openface.AlignDlib(SHAPE_PREDICTOR_PATH)
    net = openface.TorchNeuralNet(FACE_NN_PATH)

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.bind((HOST, PORT))
    sock.listen(1)
    print "server started"

    # signal to the lambda that the server is ready
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    count = 0
    not_connected = True
    while (not_connected):
        try:
            s.connect(('localhost', 10001))
            not_connected = False
        except:
            print 'waiting for host to come up', count
            if (count >= 30):
                print 'waited too long'
                sys.exit(-1)

            time.sleep(1)
            count += 1

    s.close()

    end = False
    while (not end):
        conn, addr = sock.accept()
        print 'connection made'

        print 'getting data'
        img_base64 = ''
        while True:
            data = conn.recv(MAX_BUFFER_SIZE)
            if (data[-1] == ':'):
                img_base64 += data[:-1]
                break

            img_base64 += data

        print 'checking end condition'
        poison_symbol = img_base64[0]
        img_base64 = img_base64[1:]
        if (poison_symbol == 'S'):
            end == True
            conn.close()
            sock.close()
            break

        # process image
        print 'decode image'
        bio = io.BytesIO(base64.b64decode(img_base64))
        compressed_img = np.fromstring(bio.read(), dtype=np.uint8)
        bgrImg = cv2.imdecode(compressed_img, cv2.IMREAD_COLOR)
        rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

        print 'augmenting image'
        augmented_images = augment_image(rgbImg)

        print 'getting face vectors'
        face_feature_vectors = []
        for rgbImg in augmented_images:
            try:
                print 'adding vector'
                face_feature_vector = get_face_vector(rgbImg, align, net)
                face_feature_vectors.append(face_feature_vector)
            except Exception as e:
                sys.stderr.write(str(e) + '\n')

        # send back resulting face feature vectors
        print 'sending back face vectors'
        output_csv = ''
        for vector in face_feature_vectors:
            output_csv += ','.join(map(str, vector)) + '\n'

        conn.sendall(output_csv + ':')

        # close connection
        print 'closing connection'
        conn.close()

    print 'done'
コード例 #7
0
import cv2
import openface

bgrImg = cv2.imread('test.jpg')
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

align = openface.AlignDlib('shape_predictor_68_face_landmarks.dat')
net = openface.TorchNeuralNet()

bb = align.getLargestFaceBoundingBox(rgbImg)
alignedFace = align.align(
    96, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
rep = net.forward(alignedFace)

print rep
コード例 #8
0
import openface
import numpy as np
import math
import base64
from PIL import Image
import io
import json

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
predict = os.path.join(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')
torchmodel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
align = openface.AlignDlib(predict)
net = openface.TorchNeuralNet(torchmodel)
comparenet = openface.TorchNeuralNet(torchmodel, 96)
landmarkIndices = openface.AlignDlib.OUTER_EYES_AND_NOSE


class FACE:
    def angleBetweenVectorsDegrees(self, A, vertex, C):
        """Return the angle between two vectors in any dimension space,
        in degrees."""
        # Convert the points to numpy latitude/longitude radians space
        a = np.radians(np.array(A))
        vertexR = np.radians(np.array(vertex))
        c = np.radians(np.array(C))
        # Vectors in latitude/longitude space
        sideA = a - vertexR
        sideC = c - vertexR
コード例 #9
0
ファイル: test.py プロジェクト: yikuizhai/openface
import openface

from subprocess import Popen, PIPE

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')

dlibFacePredictor = os.path.join(dlibModelDir,
                                 "shape_predictor_68_face_landmarks.dat")
networkModel = os.path.join(openfaceModelDir, 'nn4.v1.t7')
imgDim = 96

align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(networkModel, imgDim=imgDim)


def test_pipeline():
    imgPath = os.path.join(fileDir, 'images', 'examples', 'lennon-1.jpg')
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}".format(imgPath))
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
    assert np.isclose(norm(rgbImg), 11.1355)

    bb = align.getLargestFaceBoundingBox(rgbImg)
    assert bb.left() == 341
    assert bb.right() == 1006
    assert bb.top() == 193
    assert bb.bottom() == 859
コード例 #10
0
ファイル: greety.py プロジェクト: presight/greety
        text_to_speech_function = espeak_speech
    elif speech_fun_name == 'marytts':
        text_to_speech_function = marytts_speech
    else:
        text_to_speech_function = None

    update_faces_skip_frames = config.getint('Performance', 'skip_frames')

    command_queue = []
    running_command = None
    played_welcome_messages = {}
    generated_image_id = 0
    align = openface.AlignDlib(face_predictor_file)
    session_id = str(uuid.uuid1())
    tracked_persons = []
    net = openface.TorchNeuralNet(torch_network_model_file,
                                  imgDim=face_image_dim)
    iteration = 0

    vc = cv2.VideoCapture(video_capture_device)

    with open(classifierFile, 'r') as f:
        (labels, classifier) = pickle.load(f)

    while True:
        _, img = vc.read()

        # Rotate the screen if the camera is tilted
        cols, rows, _ = img.shape

        #pdb.set_trace()
        if rotate_video > 0:
コード例 #11
0
 def openfacelogreg():
     print("OpenFace DecisionTree Experiment")
     net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
     cls = LogisticRegression(C=1, multi_class ='ovr')
     cache = os.path.join(args.workDir, 'openface.cpu.logreg.pkl')
     openfacelogregDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)
コード例 #12
0
ファイル: main.py プロジェクト: 1048727525/finished
class myThread(QThread):

    cnn_face_detector = dlib.cnn_face_detection_model_v1(
        "./models/dlib/mmod_human_face_detector.dat")
    changePixmap = pyqtSignal(QImage)
    get_rejected_face = pyqtSignal(QImage)
    get_detected_face = pyqtSignal(QImage)
    INTERVAL = 10
    align = openface.AlignDlib(
        "./models/dlib/shape_predictor_68_face_landmarks.dat")
    net = openface.TorchNeuralNet("./models/openface/nn4.small2.v1.t7",
                                  imgDim=96,
                                  cuda=True)
    pause = QMutex()
    confidence = 0.8
    selected_people = []
    save_show_label = (0, 0)
    store_label = 0
    video_path = "./video/intern/1.mp4"
    #video_path = "./video/oldfriend/4.mkv"
    #video_path = "2.mp4"
    save_main_num = 0

    with open("./generated-embeddings/classifier.pkl", 'rb') as f:
        if sys.version_info[0] < 3:
            (le, clf) = pickle.load(f)
        else:
            (le, clf) = pickle.load(f, encoding='latin1')

    def align_picture(self, rect, rbgimg):
        print("size is {}".format(rbgimg.shape))
        alignedFace = self.align.align(
            96,
            rbgimg,
            rect,
            landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        return alignedFace  #:type(aligenedFace) = <class 'numpy.ndarray'>

    def get_prediction(self, rect, rbgimg):
        start = time.time()
        alignedFace = self.align_picture(rect, rbgimg)
        rep = self.net.forward(alignedFace)
        print(
            "Neural network forward pass took {} seconds.".format(time.time() -
                                                                  start))
        rep = rep.reshape(1, -1)
        start = time.time()
        predictions = self.clf.predict_proba(rep).ravel()
        maxI = np.argmax(predictions)
        person = self.le.inverse_transform(maxI)
        confidence = predictions[maxI]
        print("Prediction took {} seconds.".format(time.time() - start))
        return person, confidence

    def get_faces(self, rgbImage):
        start_time = time.time()
        detected_faces = self.cnn_face_detector(rgbImage, 1)
        rects = dlib.rectangles()
        rects.extend([d.rect for d in detected_faces])
        print("Face detection spend {}s".format(time.time() - start_time))
        return rects  #:type = <class 'dlib.rectangles'>

    def run(self):
        #cap = cv.VideoCapture(0)
        cap = cv.VideoCapture(self.video_path)
        frame_timer1 = 0
        box_color = (255, 0, 0)
        myQPen = QPen()
        myQFont = QFont()
        myQFont.setPixelSize(20)
        print(self.save_show_label[0])
        if self.save_show_label[0] != 0:
            fps = cap.get(cv.CAP_PROP_FPS)
            size = (int(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
                    int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)))
            vw = cv.VideoWriter("./result/" + "test.mp4",
                                cv.VideoWriter_fourcc(*'mp4v'), fps, size)
        while (cap.isOpened() == True):
            self.pause.lock()
            ret, frame = cap.read()
            if ret:
                rgbImage = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
                Qface = QImage(rgbImage.data, rgbImage.shape[1],
                               rgbImage.shape[0], QImage.Format_RGB888)
                #:proess the image
                if frame_timer1 % self.INTERVAL == 0:
                    rects = self.get_faces(rgbImage)
                    color_list = []
                    person_name_list = []
                    for rect in rects:
                        person, confidence = self.get_prediction(
                            rect, rgbImage)
                        person_name = "unknown"
                        if ((person.decode() in self.selected_people)
                                == True) and confidence > self.confidence:
                            self.store_label = 1
                            person_name = person.decode()
                            box_color = (255, 0, 0)
                            color_list.append(box_color)
                            person_name_list.append(person_name)
                            Qdetected_face = Qface.copy(
                                rect.left(), rect.top(), rect.width(),
                                rect.height())
                            #:在匹配的人脸上写字
                            if self.save_show_label[1] == 1:
                                myQPainter = QPainter(Qdetected_face)
                                myQFont.setPixelSize(int(rect.width() / 5))
                                myQPen.setColor(Qt.red)
                                myQPainter.setPen(myQPen)
                                myQPainter.setFont(myQFont)
                                myQPainter.drawText(Qdetected_face.rect(),
                                                    Qt.AlignTop,
                                                    str(round(confidence, 3)))
                                myQPainter.end()
                                #发送信号
                                self.get_detected_face.emit(Qdetected_face)
                            #在视屏中框出人脸
                            cv.rectangle(rgbImage, (rect.left(), rect.top()),
                                         (rect.right(), rect.bottom()),
                                         box_color, 2)
                            cv.putText(rgbImage, person_name,
                                       (rect.left(), rect.top()),
                                       cv.FONT_HERSHEY_PLAIN,
                                       rect.width() / 80, box_color, 2)

                        else:
                            self.store_label = 0
                            person_name = "unknown"
                            Qrejected_face = Qface.copy(
                                rect.left(), rect.top(), rect.width(),
                                rect.height())
                            box_color = (0, 255, 0)
                            color_list.append(box_color)
                            person_name_list.append(person_name)
                            #:在匹配的人脸上写字
                            myQPainter = QPainter(Qrejected_face)
                            myQFont.setPixelSize(int(rect.width() / 5))
                            myQPen.setColor(Qt.green)
                            myQPainter.setPen(myQPen)
                            myQPainter.setFont(myQFont)
                            myQPainter.drawText(Qrejected_face.rect(),
                                                Qt.AlignTop,
                                                str(round(confidence, 3)))
                            myQPainter.end()
                            # 发送信号
                            self.get_rejected_face.emit(Qrejected_face)
                            # 在视屏中框出人脸
                            cv.rectangle(rgbImage, (rect.left(), rect.top()),
                                         (rect.right(), rect.bottom()),
                                         box_color, 2)
                            cv.putText(rgbImage, person_name,
                                       (rect.left(), rect.top()),
                                       cv.FONT_HERSHEY_PLAIN,
                                       rect.width() / 80, box_color, 2)
                    if self.save_show_label[0] != 0:
                        vw.write(cv.cvtColor(rgbImage, cv.COLOR_BGR2RGB))
                    #:for test###
                    QrgbImage = QImage(rgbImage.data, rgbImage.shape[1],
                                       rgbImage.shape[0], QImage.Format_RGB888)
                    QrgbImage.save(
                        "./saved_image/main/{}.jpg".format(self.save_main_num),
                        "JPG", 100)
                    self.save_main_num = self.save_main_num + 1
                    #############

                else:
                    for i, rect in enumerate(rects):
                        cv.rectangle(rgbImage, (rect.left(), rect.top()),
                                     (rect.right(), rect.bottom()),
                                     color_list[i], 2)
                        cv.putText(rgbImage, person_name_list[i],
                                   (rect.left(), rect.top()),
                                   cv.FONT_HERSHEY_PLAIN,
                                   rect.width() / 80, color_list[i], 2)
                        pass
                    if self.save_show_label[0] == 2:
                        vw.write(cv.cvtColor(rgbImage, cv.COLOR_BGR2RGB))
                    elif self.save_show_label[0] == 1:
                        if self.store_label == 1:
                            vw.write(cv.cvtColor(rgbImage, cv.COLOR_BGR2RGB))
                if self.save_show_label[1] == 1:
                    QrgbImage = QImage(rgbImage.data, rgbImage.shape[1],
                                       rgbImage.shape[0], QImage.Format_RGB888)
                    self.changePixmap.emit(QrgbImage)
                frame_timer1 = (frame_timer1 + 1) % self.INTERVAL
                #time.sleep(0.01) #控制视频播放的速度
            else:
                break
            self.pause.unlock()
コード例 #13
0
def getImages():

    parser = argparse.ArgumentParser()

    parser.add_argument('--dlibFacePredictor',
                        type=str,
                        help="Path to dlib's face predictor.",
                        default=os.path.join(
                            dlibModelDir,
                            "shape_predictor_68_face_landmarks.dat"))
    parser.add_argument('--networkModel',
                        type=str,
                        help="Path to Torch network model.",
                        default=os.path.join(openfaceModelDir,
                                             'nn4.small2.v1.t7'))
    parser.add_argument('--imgDim',
                        type=int,
                        help="Default image dimension.",
                        default=96)
    parser.add_argument('--cuda', action='store_true')
    parser.add_argument('--verbose', action='store_true')

    subparsers = parser.add_subparsers(dest='mode', help="Mode")
    trainParser = subparsers.add_parser('train',
                                        help="Train a new classifier.")
    trainParser.add_argument('--ldaDim', type=int, default=-1)
    trainParser.add_argument('--classifier',
                             type=str,
                             choices=[
                                 'LinearSvm', 'GridSearchSvm', 'GMM',
                                 'RadialSvm', 'DecisionTree', 'GaussianNB',
                                 'DBN'
                             ],
                             help='The type of classifier to use.',
                             default='LinearSvm')
    trainParser.add_argument(
        'workDir',
        type=str,
        help=
        "The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'."
    )

    inferParser = subparsers.add_parser(
        'infer',
        help='Predict who an image contains from a trained classifier.')
    inferParser.add_argument(
        'classifierModel',
        type=str,
        help=
        'The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.'
    )
    inferParser.add_argument('imgs', type=str, nargs='+', help="Input image.")
    inferParser.add_argument('--multi',
                             help="Infer multiple faces in image",
                             action="store_true")

    args = parser.parse_args()
    if args.verbose:
        print("Argument parsing and import libraries took {} seconds.".format(
            time.time() - start))

    if args.mode == 'infer' and args.classifierModel.endswith(".t7"):
        raise Exception("""
Torch network model passed as the classification model,
which should be a Python pickle (.pkl)

See the documentation for the distinction between the Torch
network and classification models:

        http://cmusatyalab.github.io/openface/demo-3-classifier/
        http://cmusatyalab.github.io/openface/training-new-models/

Use `--networkModel` to set a non-standard Torch network model.""")
    start = time.time()

    align = openface.AlignDlib(args.dlibFacePredictor)
    net = openface.TorchNeuralNet(args.networkModel,
                                  imgDim=args.imgDim,
                                  cuda=args.cuda)

    if args.verbose:
        print("Loading the dlib and OpenFace models took {} seconds.".format(
            time.time() - start))
        start = time.time()

#    imgs = ['/none/sehwa/group-image-set/Brad_Pitt/000022.jpg']

#   args.imgs = imgs

    tagList = infer(align, net, args, args.multi)

    returnList = {}
    for tag in tagList:
        doc = col.find_one({"star": tag})
        rels = {}
        for key in doc['relationship'].keys():
            rels[key] = doc['relationship'][key]['relationship']
        sorted_rels = sorted(rels.items(), key=itemgetter(1), reverse=True)

        i = 0
        fileList = {}
        fileList['confidence'] = tagList[tag]
        for x, y in sorted_rels:
            if i > 10:
                continue
            else:
                imgList = []
                relation = {}
                fName = doc['relationship'][x]['imgs']
                f = open(fName, "r")
                while True:
                    line = f.readline()
                    line = line.rstrip()
                    if not line: break
                    imgList.append(line)
                relation['relationship'] = doc['relationship'][x][
                    'relationship']
                relation['imgs'] = imgList
                relation['order'] = i
                fileList[x] = relation

            i += 1
        returnList[tag] = fileList

    return (returnList)
コード例 #14
0
def main():
    global dlibObject, dumpSocket, torchNn, torchModelPath, imagePipeName, dlibModelPath
    try:
        opts, args = getopt.getopt(sys.argv[1:], "w:h:", [
            "frame-width=", "frame-height=", "labels=", "reps=", "input=",
            "output=", "preview=", "dlibmodel=", "torchmodel="
        ])
    except getopt.GetoptError as err:
        print str(err)
        usage()
        exit(2)
    frameWidth = 320
    frameHeight = 180
    pipeName = "/tmp/mtcamera"
    outputName = "/tmp/ice-annotations"
    repsFile = None
    labelsFile = None

    for o, a in opts:
        if o in ("-w", "--frame-width"):
            frameWidth = int(a)
        elif o in ("-h", "--frame-height"):
            frameHeight = int(a)
        elif o in ("--labels"):
            labelsFile = a
        elif o in ("--reps"):
            repsFile = a
        elif o in ("--input"):
            pipeName = a
        elif o in ("--output"):
            outputName = a
        elif o in ("--preview"):
            imagePipeName = a
        elif o in ("--dlibmodel"):
            dlibModelPath = a
        elif o in ("--torchmodel"):
            torchModelPath = a
        else:
            assert False, "unhandled option " + o

    if not repsFile or not labelsFile:
        print "please provide reps and labels file"
        usage()
        exit(2)

    if len(args) == 1:
        pipeName = args[0] + "." + str(frameWidth) + "x" + str(frameHeight)
    else:
        pipeName += "." + str(frameWidth) + "x" + str(frameHeight)

    s = Socket(SUB)
    s.connect("ipc://" + pipeName)
    s.set_string_option(SUB, SUB_SUBSCRIBE, '')
    s.set_string_option(SUB, SUB_SUBSCRIBE, '')

    dumpSocket = Socket(PUB)
    dumpSocket.connect("ipc://" + outputName)

    print " > reading frames " + str(frameWidth) + "x" + str(
        frameHeight) + " from " + pipeName
    print " > writing annotations to " + outputName
    print " > preview at " + imagePipeName + " (ffplay -f rawvideo -vcodec rawvideo -s " + str(
        frameWidth) + "x" + str(
            frameHeight) + " -pix_fmt bgr24 -i " + imagePipeName + ")"
    print " > loading dlib model from " + dlibModelPath
    print " > loading torch model from " + torchModelPath
    print " > reps file " + repsFile
    print " > labels file " + labelsFile
    print ""

    print(" > initializing OpenFace...")
    dlibObject = openface.AlignDlib(dlibModelPath)
    print(" > ...done.")

    print(" > initializing classifier...")
    initClassifier(labelsFile, repsFile)
    print(" > ...done.")

    print(" > initializing torch feature extractor...")
    torchNn = openface.TorchNeuralNet(torchModelPath, imgDim=96, cuda=True)
    print(" > ...done.")

    print(" > processing frames of size " + str(frameWidth) + "x" +
          str(frameHeight) + " from " + pipeName)
    processFrames(s, frameWidth, frameHeight)
コード例 #15
0
import sys, cv2, time, os
import openface, numpy

IMG_DIM = 96

modelDir = os.path.join('openface', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
dlibFacePredictor = os.path.join(dlibModelDir,
                                 "shape_predictor_68_face_landmarks.dat")
openfaceModelDir = os.path.join(modelDir, 'openface')
networkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')

align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(networkModel, IMG_DIM)


def dlib2sdl(rect):
    from pygame import Rect
    return Rect(rect.left(), rect.top(), rect.width(), rect.height())


def getRep(imgPath):
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}".format(imgPath))
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    start = time.time()
    bb = align.getLargestFaceBoundingBox(rgbImg)
    if bb is None:
        raise Exception("Unable to find a face: {}".format(imgPath))
コード例 #16
0
def main():
    parser = argparse.ArgumentParser()
    lfwDefault = os.path.expanduser(
        "~/openface/data/lfw/dlib.affine.sz:96.OuterEyesAndNose")
    parser.add_argument('--lfwAligned',
                        type=str,
                        default=lfwDefault,
                        help='Location of aligned LFW images')
    parser.add_argument('--networkModel',
                        type=str,
                        help="Path to Torch network model.",
                        default=os.path.join(openfaceModelDir,
                                             'nn4.small2.v1.t7'))
    parser.add_argument('--largeFont', action='store_true')
    parser.add_argument(
        'workDir',
        type=str,
        help='The work directory where intermediate files and results are kept.'
    )
    args = parser.parse_args()
    # print(args)

    if args.largeFont:
        font = {'family': 'normal', 'size': 20}
        mpl.rc('font', **font)

    mkdirP(args.workDir)

    print("Getting lfwPpl")
    lfwPplCache = os.path.join(args.workDir, 'lfwPpl.pkl')
    lfwPpl = cacheToFile(lfwPplCache)(getLfwPplSorted)(args.lfwAligned)

    print("Eigenfaces Experiment")
    cls = cv2.createEigenFaceRecognizer()
    cache = os.path.join(args.workDir, 'eigenFacesExp.pkl')
    eigenFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("Fisherfaces Experiment")
    cls = cv2.createFisherFaceRecognizer()
    cache = os.path.join(args.workDir, 'fisherFacesExp.pkl')
    fishFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("LBPH Experiment")
    cls = cv2.createLBPHFaceRecognizer()
    cache = os.path.join(args.workDir, 'lbphExp.pkl')
    lbphFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("OpenFace CPU/SVM Experiment")
    net = openface.TorchNeuralNet(args.networkModel, 96, cuda=False)
    cls = SVC(kernel='linear', C=1)
    cache = os.path.join(args.workDir, 'openface.cpu.svm.pkl')
    openfaceCPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace GPU/SVM Experiment")
    net = openface.TorchNeuralNet(args.networkModel, 96, cuda=True)
    cache = os.path.join(args.workDir, 'openface.gpu.svm.pkl')
    openfaceGPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    plotAccuracy(args.workDir, args.largeFont, eigenFacesDf, fishFacesDf,
                 lbphFacesDf, openfaceCPUsvmDf, openfaceGPUsvmDf)
    plotTrainingTime(args.workDir, argrs.largeFont, eigenFacesDf, fishFacesDf,
                     lbphFacesDf, openfaceCPUsvmDf, openfaceGPUsvmDf)
    plotPredictionTime(args.workDir, args.largeFont, eigenFacesDf, fishFacesDf,
                       lbphFacesDf, openfaceCPUsvmDf, openfaceGPUsvmDf)
コード例 #17
0
import cv2
import sys
import operator
import base64
import json
import numpy as np
import openface
from flask import Flask, jsonify, request, abort

app = Flask(__name__)

print "Loading model"
predictor_model = "shape_predictor_68_face_landmarks.dat"
network_model = "nn4.small2.v1.t7"
face_aligner = openface.AlignDlib(predictor_model)
net = openface.TorchNeuralNet(network_model, 96, cuda='store_true')

model_path = 'kelas/' + 'MBD-F' + '/'
model_file = model_path + 'model.pkl'
model = pickle.load(open(model_file, 'rb'))

print "Ready!"


@app.route('/predict', methods=['POST'])
def ApiCall():
    print "starting..."
    try:
        img = cv2.imdecode(
            np.fromstring(base64.b64decode(request.values['imagefile']),
                          np.uint8), cv2.IMREAD_COLOR)
コード例 #18
0
SERVER_IMG_DIM = 80
SERVER_CUDA = False

SERVER_MODE = "Stateful"
SERVER_DLIB_FACEPREDICTOR = os.path.join(
    fileDir, "FacePredictor",
    "shape_predictor_68_face_landmarks.dat")  # Path to dlib's face predictor
SERVER_OPENFACE_MODEL = os.path.join(
    fileDir, "Openface", "nn4.small2.v1.t7")  # Opencface torch net model

SERVER_PRETRAINED = os.path.join(fileDir, "Pretrained", "classifier.pkl")
SERVER_MULT_FACE_INFER = True

align = openface.AlignDlib(SERVER_DLIB_FACEPREDICTOR)

# Output folder for performance measure
SERVER_PROFILE_ENABLE = True
SERVER_PROFILE_DIR = os.path.join(fileDir, 'Profile')

# Parallel computing optimization
SERVER_FACE_SEARCH_OPTIMIZE = True
SERVER_FACE_SEARCH_PADDING = 0.5

SERVER_USE_PYTORCH = False
if SERVER_USE_PYTORCH:
    from OpenFacePytorch.loadOpenFace import prepareOpenFace
    net = prepareOpenFace(useCuda=False).eval()
else:
    net = openface.TorchNeuralNet(SERVER_OPENFACE_MODEL,
                                  imgDim=SERVER_IMG_DIM,
                                  cuda=SERVER_CUDA)
コード例 #19
0
batch_size = 32
test_size = 100
num_class_per_group = 46
num_epoch = 30

# Network Parameters
g_fc_layer1_dim = 1024
g_fc_layer2_dim = 512  # Final representation
g_fc_layer3_dim = 128

g_dense_block_layers = 4
g_dense_block_depth = 128

dlibDetector = dlib.get_frontal_face_detector()
align = openface.AlignDlib('openface/models/dlib/shape_predictor_68_face_landmarks.dat')
triplet = openface.TorchNeuralNet('openface/models/openface/nn4.small2.v1.t7', imgDim='96', cuda=True)


def get_center_loss(features, labels):
    with tf.variable_scope('center', reuse=tf.AUTO_REUSE):
        centers = tf.get_variable('centers')

    len_features = features.get_shape()[1]

    labels = tf.reshape(labels, [-1])
    centers_batch = tf.gather(centers, labels)

    loss = tf.reduce_mean(tf.reduce_sum((features - centers_batch)**2, [1]))

    return loss
コード例 #20
0
class Arg:
    def __init__(self, dlibFacePredictor = os.path.join(
            dlibModelDir,
            "shape_predictor_68_face_landmarks.dat"),
    networkModel=os.path.join(
            openfaceModelDir,
            'nn4.small2.v1.t7'),    imgDim = 96, captureDevice = 0, width = 320, height = 240, threshold= 0.5, classifierModel = SVC(kernel = 'rbf', C= 5), cuda = False):
    self.dlibFacePredictor = dlibFacePredictor
    self.networkModel = networkModel
    self.imgDim = imgDim
    self.captureDevice = captureDevice
    self.width = width
    self.height  = height
    self.classifierModel = classifierModel
    self.cuda = cuda

'''
    getRep returns the feature of each faces
'''
def getRep(bgrImg, net):
    start = time.time()
    if bgrImg is None:
        raise Exception("Unable to load image/frame")

    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    

    start = time.time()

    # Get the largest face bounding box
    # bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box

    # Get all bounding boxes
    bb = align.getAllFaceBoundingBoxes(rgbImg)

    if bb is None:
        # raise Exception("Unable to find a face: {}".format(imgPath))
        return None
    start = time.time()

    alignedFaces = []
    for box in bb:
        alignedFaces.append(
            align.align(
                args.imgDim,
                rgbImg,
                box,
                landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
    if alignedFaces is None:
        raise Exception("Unable to align the frame")
    start = time.time()

    reps = []
    for alignedFace in alignedFaces:
        reps.append(net.forward(alignedFace))
    # print reps
    return reps

'''
    infer() returns the predicted people in the frame as well as 
    the corresponding information (e.g. summary of shared posts and similarty between he/she and you) stored in server.
'''
def infer(img, args):
    #assume all attendee profile pictures have been downloaded into ./attendee/, with attendee id being file name
    image_list = []
    id_list = []
    for filename in glob.glob('attendee/*.png'): #assuming gif
        im=Image.open(filename)
        id_list.append(filename)
        image_list.append(im)


    net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
    reps = getRep(img, net) #return the detected and aligned faces in the video frame
    persons = []
    infos = []
    similarities = []
    for rep in reps:
        try:
            rep = rep.reshape(1, -1)
        except:
            print "No Face detected"
            return (None, None, None)
        start = time.time()

        for attendee_img in image_list:
            d = rep-getRep(attendee_img, net)
            distances.append(np.dot(d,d))
        # print predictions
        minI = np.argmin(distances) #Returns the indices of the maximum values along an axis.
        attendee_id = id_list[minI]
        url = serverAttendeeRoot + "/" + attendee_id
        r = requests.get(url)
        person = r.json()['name']
        info = r.json()['info']
        similarity = r.json()['similarity']
        persons.append(person)
        infos.append(info)
        similarities.append(similarity)
        
    return (persons, infos,similarities)


if __name__ == '__main__':
    


    args = Arg()

    align = openface.AlignDlib(args.dlibFacePredictor)
    net = openface.TorchNeuralNet(
        args.networkModel,
        imgDim=args.imgDim, cuda = args.cuda)

    # Capture device. Usually 0 will be webcam and 1 will be usb cam.
    video_capture = cv2.VideoCapture(args.captureDevice)
    video_capture.set(3, args.width) 
    video_capture.set(4, args.height) 


    while True:
        ret, frame = video_capture.read()
        persons, infos, similarities = infer(frame, args)
        if persons == None: continue
        for i, value in enumerate(similarities):
            #if the similarities between you and this attendee is greater than 0.7, mark green
            if similarities[i] > 0.7: 
                cv2.putText(frame, "Name: {} Info: {}".format(person, info),
                            (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
            #otherwise, mark white
            else:
                cv2.putText(frame, "Name: {} Info: {}".format(person, info),
                            (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            cv2.imshow('', frame)
        # quit the program on the press of key 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
コード例 #21
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--lfwDlibAligned', type=str,
                        help='Location of Dlib aligned LFW images')
    parser.add_argument('--lfwMtcnnAligned', type=str,
                    help='Location of MTCNN aligned LFW images')
    parser.add_argument('--largeFont', action='store_true')
    parser.add_argument('workDir', type=str,
                        help='The work directory where intermediate files and results are kept.')
    args = parser.parse_args()
    # print(args)

    if args.largeFont:
        font = {'family': 'normal', 'size': 20}
        mpl.rc('font', **font)

    mkdirP(args.workDir)

    print("Getting lfwPpl")
    lfwPplCache = os.path.join(args.workDir, 'lfwPpl.pkl')
    lfwPpl = cacheToFile(lfwPplCache)(getLfwPplSorted)(args.lfwDlibAligned)

    print("Getting lfwMtcnnPpl")
    lfwMtcnnPplCache = os.path.join(args.workDir, 'lfwMtcnnPpl.pkl')
    lfwMtcnnPpl = cacheToFile(lfwMtcnnPplCache)(getLfwPplSorted)(args.lfwMtcnnAligned)


    print("OpenFace SVM Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = SVC(C=1, kernel='linear')
    cache = os.path.join(args.workDir, 'openface.cpu.svm.pkl')
    openfaceCPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace LinearSVC Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = LinearSVC(C=1, multi_class='ovr')
    cache = os.path.join(args.workDir, 'openface.cpu.linearsvm.pkl')
    openfaceCPUlinearsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace GaussianNB Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = GaussianNB()
    cache = os.path.join(args.workDir, 'openface.cpu.gaussiannb.pkl')
    openfacegaussiannbDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace DecisionTree Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = DecisionTreeClassifier(max_depth=20)
    cache = os.path.join(args.workDir, 'openface.cpu.dectree.pkl')
    openfacedectreeDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace DecisionTree Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = LogisticRegression(C=1, multi_class ='ovr')
    cache = os.path.join(args.workDir, 'openface.cpu.logreg.pkl')
    openfacelogregDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)


    print("Facenet SVM Experiment")
    cls = SVC(C=1, kernel='linear')
    cache = os.path.join(args.workDir, 'facenet.svm.pkl')
    facenetsvmDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl, facenetModelDir, cls)

    print("Facenet LinearSVC Experiment")
    cls = LinearSVC(C=1, multi_class='ovr')
    cache = os.path.join(args.workDir, 'facenet.linearsvm.pkl')
    facenetlinearsvmDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl, facenetModelDir, cls)

    print("Facenet GaussianNB Experiment")
    cls = GaussianNB()
    cache = os.path.join(args.workDir, 'facenet.gaussiannb.pkl')
    facenetgaussiannbDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl, facenetModelDir, cls)

    print("Facenet DecisionTree Experiment")
    cls = DecisionTreeClassifier(max_depth=20)
    cache = os.path.join(args.workDir, 'facenet.dectree.pkl')
    facenetdectreeDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl, facenetModelDir, cls)

    print("Facenet LogisticRegression Experiment")
    cls = LogisticRegression(C=1, multi_class ='ovr')
    cache = os.path.join(args.workDir, 'facenet.logreg.pkl')
    facenetlogregDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl, facenetModelDir, cls)

    plotAccuracy(args.workDir, args.largeFont,
                    openfaceCPUsvmDf, openfaceCPUlinearsvmDf, openfacegaussiannbDf,
                    openfacedectreeDf, openfacelogregDf, facenetsvmDf, facenetlinearsvmDf, facenetgaussiannbDf, facenetdectreeDf, facenetlogregDf)
    plotTrainingTime(args.workDir, args.largeFont,
                    openfaceCPUsvmDf, openfaceCPUlinearsvmDf, openfacegaussiannbDf,
                    openfacedectreeDf, openfacelogregDf, facenetsvmDf, facenetlinearsvmDf, facenetgaussiannbDf, facenetdectreeDf, facenetlogregDf)
    plotPredictionTime(args.workDir, args.largeFont,
                    openfaceCPUsvmDf, openfaceCPUlinearsvmDf, openfacegaussiannbDf,
                    openfacedectreeDf, openfacelogregDf, facenetsvmDf, facenetlinearsvmDf, facenetgaussiannbDf, facenetdectreeDf, facenetlogregDf)
コード例 #22
0
    def __init__(self):

        self.training = True
        self.trainingEvent = threading.Event()
        self.trainingEvent.set()

        self.alarmState = 'Disarmed'  #disarmed, armed, triggered
        self.alarmTriggerd = False
        self.alerts = []
        self.cameras = []

        self.peopleDB = []

        self.camera_threads = []
        self.camera_facedetection_threads = []
        self.people_processing_threads = []
        self.svm = None

        self.video_frame1 = None
        self.video_frame2 = None
        self.video_frame3 = None

        self.fileDir = os.path.dirname(os.path.realpath(__file__))
        self.luaDir = os.path.join(self.fileDir, '..', 'batch-represent')
        self.modelDir = os.path.join(self.fileDir, '..', 'models')
        self.dlibModelDir = os.path.join(self.modelDir, 'dlib')
        self.openfaceModelDir = os.path.join(self.modelDir, 'openface')

        parser = argparse.ArgumentParser()
        parser.add_argument('--dlibFacePredictor',
                            type=str,
                            help="Path to dlib's face predictor.",
                            default=os.path.join(
                                self.dlibModelDir,
                                "shape_predictor_68_face_landmarks.dat"))
        parser.add_argument('--networkModel',
                            type=str,
                            help="Path to Torch network model.",
                            default=os.path.join(self.openfaceModelDir,
                                                 'nn4.small2.v1.t7'))
        parser.add_argument('--imgDim',
                            type=int,
                            help="Default image dimension.",
                            default=96)
        parser.add_argument('--cuda', action='store_true')
        parser.add_argument('--unknown',
                            type=bool,
                            default=False,
                            help='Try to predict unknown people')

        self.args = parser.parse_args()
        self.align = openface.AlignDlib(self.args.dlibFacePredictor)
        self.net = openface.TorchNeuralNet(self.args.networkModel,
                                           imgDim=self.args.imgDim,
                                           cuda=self.args.cuda)

        #////////////////////////////////////////////////////Initialization////////////////////////////////////////////////////

        #self.change_alarmState()
        #self.trigger_alarm()

        #self.trainClassifier()  # add faces to DB and train classifier

        #default IP cam
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("debugging/iphone_distance1080pHD.m4v"))
        self.cameras.append(Camera.VideoCamera("debugging/Test.mov"))
        #self.cameras.append(Camera.VideoCamera("debugging/Test.mov"))
        #self.cameras.append(Camera.VideoCamera("debugging/rotationD.m4v"))
        #self.cameras.append(Camera.VideoCamera("debugging/example_01.mp4"))

        #self.change_alarmState()
        #self.trigger_alarm()
        self.getFaceDatabaseNames()
        #self.trainClassifier()  # add faces to DB and train classifier

        #processing frame threads- for detecting motion and face detection

        for i, cam in enumerate(self.cameras):
            self.proccesing_lock = threading.Lock()
            thread = threading.Thread(name='frame_process_thread_' + str(i),
                                      target=self.process_frame,
                                      args=(cam, ))
            thread.daemon = False
            self.camera_threads.append(thread)
            thread.start()

        #Threads for alignment and recognition

        # for i, cam in enumerate(self.cameras):
        #   #self.proccesing_lock = threading.Lock()
        #   thread = threading.Thread(name='face_process_thread_' + str(i),target=self.people_processing,args=(cam,))
        #   thread.daemon = False
        #   self.people_processing_threads.append(thread)
        #   thread.start()

        #Thread for alert processing
        self.alerts_lock = threading.Lock()
        thread = threading.Thread(name='alerts_process_thread_',
                                  target=self.alert_engine,
                                  args=())
        thread.daemon = False
        thread.start()
コード例 #23
0
#!/usr/bin/env python2

import openface

dlibFacePredictor = '/root/openface/openface/models/dlib/shape_predictor_68_face_landmarks.dat'
networkModel = '/root/openface/openface/models/openface/nn4.small2.v1.t7'

align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(networkModel, 96)
コード例 #24
0
from sklearn.mixture import GMM
import openface
from celery.decorators import task
from firebase import firebase

np.set_printoptions(precision=2)

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')

networkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')

net = openface.TorchNeuralNet(
    networkModel,
    imgDim=96,  # 96 is kept as img Dimension
    cuda=False)

dlibFacePredictor = os.path.join(dlibModelDir,
                                 "shape_predictor_68_face_landmarks.dat")

align = openface.AlignDlib(dlibFacePredictor)

classifierModel = fileDir + '/../generated-embeddings/classifier.pkl'


def getRep(bgrImg):
    global net, align
    start = time.time()
    if bgrImg is None:
        raise Exception("Unable to load image/frame")
コード例 #25
0
ファイル: swmpredict.py プロジェクト: Octoberr/openfacelearn
# coding:utf-8
import os
import openface
import cv2
import pickle
import numpy as np

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
classifierDir = os.path.join(fileDir, '..', 'generated-embeddings')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
predict = os.path.join(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')
torchmodel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
align = openface.AlignDlib(predict)
net = openface.TorchNeuralNet(torchmodel)
landmarkIndices = openface.AlignDlib.OUTER_EYES_AND_NOSE
predictdatabase = os.path.join(classifierDir, 'classifier.pkl')  # 人脸数据库


# 获取人脸的处理
def getRep(img):
    bgrImg = cv2.imread(img)
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
    bbs = align.getAllFaceBoundingBoxes(rgbImg)
    reps = []
    for bb in bbs:
        facelandmarks = align.findLandmarks(rgbImg, bb)
        alignedFace = align.align(96,
                                  rgbImg,
                                  bb,
コード例 #26
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--lfwDlibAligned',
                        type=str,
                        help='Location of Dlib aligned LFW images')
    parser.add_argument('--lfwMtcnnAligned',
                        type=str,
                        help='Location of MTCNN aligned LFW images')
    parser.add_argument('--largeFont', action='store_true')
    parser.add_argument(
        'workDir',
        type=str,
        help='The work directory where intermediate files and results are kept.'
    )
    args = parser.parse_args()
    # print(args)

    if args.largeFont:
        font = {'family': 'normal', 'size': 20}
        mpl.rc('font', **font)

    mkdirP(args.workDir)

    print("Getting lfwPpl")
    lfwPplCache = os.path.join(args.workDir, 'lfwPpl.pkl')
    lfwPpl = cacheToFile(lfwPplCache)(getLfwPplSorted)(args.lfwDlibAligned)

    print("Eigenfaces Experiment")
    cls = cv2.createEigenFaceRecognizer()
    cache = os.path.join(args.workDir, 'eigenFacesExp.pkl')
    eigenFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("Fisherfaces Experiment")
    cls = cv2.createFisherFaceRecognizer()
    cache = os.path.join(args.workDir, 'fisherFacesExp.pkl')
    fishFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("LBPH Experiment")
    cls = cv2.createLBPHFaceRecognizer()
    cache = os.path.join(args.workDir, 'lbphExp.pkl')
    lbphFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("OpenFace SVM Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = SVC(C=1, kernel='linear')
    cache = os.path.join(args.workDir, 'openface.cpu.svm.pkl')
    openfaceCPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace LinearSVC Experiment")
    net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    cls = LinearSVC(C=1, multi_class='ovr')
    cache = os.path.join(args.workDir, 'openface.cpu.linearsvm.pkl')
    openfaceCPUlinearsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    #print("OpenFace GaussianNB Experiment")
    #net = openface.TorchNeuralNet(openfaceModelPath, 96, cuda=False)
    #cls = GaussianNB()
    #cache = os.path.join(args.workDir, 'openface.cpu.gaussiannb.pkl')
    #openfaceCPUgaussiannbDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("Getting lfwMtcnnPpl")
    lfwMtcnnPplCache = os.path.join(args.workDir, 'lfwMtcnnPpl.pkl')
    lfwMtcnnPpl = cacheToFile(lfwMtcnnPplCache)(getLfwPplSorted)(
        args.lfwMtcnnAligned)

    print("Facenet LinearSVC Experiment")
    cls = LinearSVC(C=1, multi_class='ovr')
    cache = os.path.join(args.workDir, 'facenet.linearsvm.pkl')
    facenetlinearsvmDf = cacheToFile(cache)(facenetExp)(lfwMtcnnPpl,
                                                        facenetModelDir, cls)

    #print("OpenFace GPU/SVM Experiment")
    #net = openface.TorchNeuralNet(args.networkModel, 96, cuda=True)
    #cache = os.path.join(args.workDir, 'openface.gpu.svm.pkl')
    #openfaceGPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    # TODO add Facenet and other models, openbr?

    plotAccuracy(args.workDir, args.largeFont, eigenFacesDf, fishFacesDf,
                 lbphFacesDf, openfaceCPUsvmDf, openfaceCPUlinearsvmDf,
                 facenetlinearsvmDf)
    plotTrainingTime(args.workDir, args.largeFont, eigenFacesDf, fishFacesDf,
                     lbphFacesDf, openfaceCPUsvmDf, openfaceCPUlinearsvmDf,
                     facenetlinearsvmDf)
    plotPredictionTime(args.workDir, args.largeFont, eigenFacesDf, fishFacesDf,
                       lbphFacesDf, openfaceCPUsvmDf, openfaceCPUlinearsvmDf,
                       facenetlinearsvmDf)
コード例 #27
0
from img_histo import gray_histo, rgb_histo, yuv_histo, hsv_histo, abs_dist
from img_gist import gist
from kmeans import eculidean_dist, norm0_dist
from img_hog import hog2, hog3, hog_lsh_list, hog_histo
from img_sift import sift2, sift_histo
from lsh import LSH_hog, LSH_sift
from rerank import blending, ensembling
import cPickle
import openface

openfacedir = '/home/ubuntu/Documents/openface'
modelDir = osp.join(openfacedir, 'models/dlib',
                    "shape_predictor_68_face_landmarks.dat")
align = openface.AlignDlib(modelDir)
netDir = osp.join(openfacedir, 'models/openface', 'nn4.small2.v1.t7')
net = openface.TorchNeuralNet(netDir, imgDim=96, cuda=False)

upload_prefix = './static/upload/'
SETNAME = 'lfw_raw'

with open('../static/url.pkl') as fh:
    urldat = cPickle.load(fh)

app = Flask(__name__)
# db = SQLAlchemy(app)r
UPLOAD_FOLDER = upload_prefix
# ALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
api = Api(app)

コード例 #28
0
app = Flask(__name__, static_url_path="")

# global variables
brokerURL = ''
outputs = []
timer = None
lock = threading.Lock()
camera = None
cameraURL = ''
total_size = 0

threshold = 1.0
imageDemension = 96
align = openface.AlignDlib(
    '/root/openface/models/dlib/shape_predictor_68_face_landmarks.dat')
net = openface.TorchNeuralNet(
    '/root/openface/models/openface/nn4.small2.v1.t7', imageDemension)

saveLocation = ''
featuresOfTarget = None
targetedFeaturesIsSet = False


@app.errorhandler(400)
def not_found(error):
    return make_response(jsonify({'error': 'Bad request'}), 400)


@app.errorhandler(404)
def not_found(error):
    return make_response(jsonify({'error': 'Not found'}), 404)
コード例 #29
0
dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat")
torchNetworkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')


labelsPath = os.path.join(generatedDir, 'labels.csv')
repsPath = os.path.join(generatedDir, 'reps.csv')


imgDim = 96
imgSize = 96
landmarks = 'outerEyesAndNose'
ldaDim = 1

align_pred = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(torchNetworkModel, imgDim=imgDim,
                              cuda=True)


def getRep(imgPath, args, multiple=False):
    start = time.time()
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}\n".format(imgPath))

    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    if args.verbose:
        print("  + Original size: {}".format(rgbImg.shape))
        print("Loading the image took {} seconds.".format(time.time() - start))

    start = time.time()
コード例 #30
0
ファイル: doPredict.py プロジェクト: dwisyn/html
        rank += 1
        if key == nrp:
            prob = val
            break
    score = round(prob * 100, 2)
    if prob == -1:
        return "ERR: Face data of " + nrp + " are currently not registered (500)"

    if rank <= 5 and score > 80:
        result = "ACCEPTED,"

    else:
        result = "REJECTED,"

    return "%s %s with score %g  rank %g" % (result, nrp, score, rank)


if __name__ == '__main__':
    t = time.time()
    predictor_model = "shape_predictor_68_face_landmarks.dat"
    network_model = "nn4.small2.v1.t7"
    face_aligner = openface.AlignDlib(predictor_model)
    net = openface.TorchNeuralNet(network_model, dim)
    model = loadModel('model.pkl')
    nrp = sys.argv[1]
    imagePath = sys.argv[2]

    r = prediksiImg(imagePath, nrp, model, face_aligner, net)
    elapsed = time.time() - t
    print "%s (Time Elpased = %g)" % (r, elapsed)