Пример #1
0
parser.add_argument('--dir', type=str, default=fileDir, help='video directory')
parser.add_argument('--num', type=int, default=1000, help='num of images')
parser.add_argument('--dim', type=int, default=96, help='alignment dimension')
args = parser.parse_args()

print(args)

videoCapture = cv2.VideoCapture(os.path.join(args.dir, '001.mp4'))

if not videoCapture.isOpened(): sys.exit('video not opened')

template = np.load(os.path.join(fileDir, 'template.npy'))
delaunay = scipy.spatial.Delaunay(template)

facePredictor = os.path.join(fileDir, 'shape_predictor_68_face_landmarks.dat')
alignDlib = openface.AlignDlib(facePredictor)
alignment = alignment.Alignment(args.dim, template, delaunay.simplices)

print('processing images...')

for index in range(args.num):

    ret, rawImage = videoCapture.read()

    if not ret: break
    
    boundingBox = alignDlib.getLargestFaceBoundingBox(rawImage)
    landmarks = alignDlib.findLandmarks(rawImage, boundingBox)

    alignedImage = alignment.align(rawImage, landmarks)
Пример #2
0
                        dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel',
                    type=str,
                    help="Path to Torch network model.",
                    default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim',
                    type=int,
                    help="Default image dimension.",
                    default=96)
parser.add_argument('--verbose', action='store_true')

parser.add_argument('--cuda', action='store_true')

args = parser.parse_args()

align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel,
                              imgDim=args.imgDim,
                              cuda=args.cuda)

redis_ready = False


def debug_print(str):
    if debug is True:
        print(str)


def info_print(str):
    if info is True:
        print(str)
Пример #3
0
def alignMain(args):
    openface.helper.mkdirP(args.outDir)

    check = os.listdir(os.path.normpath(args.outDir)).__contains__(args.name)
    print(check)
    sock.sendall((str(check) + "\n").encode())
    if not check:
        os.mkdir(os.path.join(args.outDir, args.name))
    else:
        shutil.rmtree(os.path.join(args.outDir, args.name))
        os.mkdir(os.path.join(args.outDir, args.name))

    print('directory created')
    sock.sendall('directory created\n'.encode())
    imgs = list(iterImgs(args.inputDir))
    sock.sendall("length\n".encode())
    sock.sendall((str(len(imgs) * 3) + "\n").encode())

    # Shuffle so multiple versions can be run at once.
    random.shuffle(imgs)

    landmarkMap = {
        'outerEyesAndNose': openface.AlignDlib.OUTER_EYES_AND_NOSE,
        'innerEyesAndBottomLip': openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP
    }
    if args.landmarks not in landmarkMap:
        raise Exception("Landmarks unrecognized: {}".format(args.landmarks))

    landmarkIndices = landmarkMap[args.landmarks]

    align = openface.AlignDlib(args.dlibFacePredictor)

    nFallbacks = 0
    cnt = 0
    for imgObject in imgs:
        print("=== {} ===".format(imgObject.path))
        s = os.path.normpath(args.inputDir) + '/' + imgObject.name
        img = cv2.imread(s)
        if img is None:
            sock.sendall("  + Unable to load.\n".encode())
            sock.send("processed\n".encode())
        width = img.shape[1]
        height = img.shape[0]
        check = False
        if width > 600:
            width = 600
            check = True
        if height > 600:
            height = 600
            check = True
        if check:
            img = cv2.resize(img, (width, height),
                             interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(
            os.path.normpath(args.inputDir) + '/1' + imgObject.name, img)
        im = Image.open(
            os.path.normpath(args.inputDir) + '/1' + imgObject.name)
        os.remove(os.path.normpath(args.inputDir) + '/1' + imgObject.name)
        contrast = ImageEnhance.Contrast(im)
        bright = ImageEnhance.Brightness(im)
        sock.sendall(("=== {} ===\n".format(imgObject.path)).encode())
        outDir = os.path.join(args.outDir, args.name)
        #outDir = os.path.join(args.outDir, imgObject.cls)
        openface.helper.mkdirP(outDir)
        outputPrefix = os.path.join(outDir,
                                    os.path.splitext(imgObject.name)[0])
        j = 0
        i = 0.7
        t = False
        for k in range(1, 3, 1):
            for l in range(1, 4):
                if k != 1 and l == 1:
                    continue
                elif t:
                    im = Image.open(s)
                    contrast = ImageEnhance.Contrast(im)
                    bright = ImageEnhance.Brightness(im)
                if k == 1:
                    imgName = outputPrefix + ".png"
                elif l == 2:
                    imgObject = contrast.enhance(i)
                    imgObject = np.array(imgObject)
                    imgName = outputPrefix + "_" + str(j) + ".png"
                elif l == 3:
                    imgObject = bright.enhance(i)
                    imgObject = np.array(imgObject)
                    imgName = outputPrefix + "_" + str(j + 1) + ".png"
                if k == 1:
                    rgb = imgObject.getRGB()
                    print(type(rgb))
                else:
                    rgb = imgObject
                cnt = cnt + 1
                sock.sendall(("cnt " + str(cnt) + "\n").encode())
                if rgb is None:
                    if args.verbose:
                        print("  + Unable to load.")
                        sock.sendall("  + Unable to load.\n".encode())
                        sock.send("processed\n".encode())
                    outRgb = None
                else:
                    outRgb = align.align(args.size,
                                         rgb,
                                         landmarkIndices=landmarkIndices,
                                         skipMulti=args.skipMulti)
                    if outRgb is None and args.verbose:
                        print("  + Unable to align." + " " + str(k) + " " +
                              str(l))
                        sock.sendall("  + Unable to align.\n".encode())
                        sock.send("processed\n".encode())
                if args.fallbackLfw and outRgb is None:
                    nFallbacks += 1
                    deepFunneled = "{}/{}.jpg".format(
                        os.path.join(args.fallbackLfw, imgObject.cls),
                        imgObject.name)
                    shutil.copy(
                        deepFunneled, "{}/{}.jpg".format(
                            os.path.join(args.outDir, imgObject.cls),
                            imgObject.name))

                if outRgb is not None:
                    if args.verbose:
                        print("  + Writing aligned file to disk.")
                        sock.sendall(
                            "  + Writing aligned file to disk.\n".encode())
                    outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(imgName, outBgr)
                    sock.sendall("processed\n".encode())
                    if k == 1:
                        x = outputPrefix + ".png"
                        s = x
                        t = True
                        sock.sendall("write to index\n".encode())
                        sock.sendall((x + '\n').encode())
                        break
            if k != 1:
                i += 0.3
                j += 2
        if args.fallbackLfw:
            print('nFallbacks:', nFallbacks)
Пример #4
0
def alignMain(args):
    openface.helper.mkdirP(args.outputDir)
    print("alignMail call")
    
    imgs = list(iterImgs(args.inputDir))
    print('imgs : ')
    print(imgs)
    print("------------------------------")

    # Shuffle so multiple versions can be run at once.
    random.shuffle(imgs)

    landmarkMap = {
        'outerEyesAndNose': openface.AlignDlib.OUTER_EYES_AND_NOSE,
        'innerEyesAndBottomLip': openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP
    }
    if args.landmarks not in landmarkMap:
        raise Exception("Landmarks unrecognized: {}".format(args.landmarks))

    landmarkIndices = landmarkMap[args.landmarks]

    align = openface.AlignDlib(args.dlibFacePredictor)

    nFallbacks = 0
    for imgObject in imgs:
        print("=== {} ===".format(imgObject.path))
        outDir = os.path.join(args.outputDir, imgObject.cls)
        openface.helper.mkdirP(outDir)
        outputPrefix = os.path.join(outDir, imgObject.name)
        imgName = outputPrefix + ".png"

        if os.path.isfile(imgName):
            if args.verbose:
                print("  + Already found, skipping.")
        else:
            rgb = imgObject.getRGB()
            if rgb is None:
                if args.verbose:
                    print("  + Unable to load.")
                outRgb = None
            else:
                outRgb = align.align(args.size, rgb,
                                     landmarkIndices=landmarkIndices,
                                     skipMulti=args.skipMulti)
                if outRgb is None and args.verbose:
                    print("  + Unable to align.")

            if args.fallbackLfw and outRgb is None:
                nFallbacks += 1
                deepFunneled = "{}/{}.jpg".format(os.path.join(args.fallbackLfw,
                                                               imgObject.cls),
                                                  imgObject.name)
                shutil.copy(deepFunneled, "{}/{}.jpg".format(os.path.join(args.outputDir,
                                                                          imgObject.cls),
                                                             imgObject.name))

            if outRgb is not None:
                if args.verbose:
                    print("  + Writing aligned file to disk.")
                outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR)
                cv2.imwrite(imgName, outBgr)

    if args.fallbackLfw:
        print('nFallbacks:', nFallbacks)
Пример #5
0
    _thread.start_new_thread(networkServer.rpcServer, ())
    print("yo")
except Exception as e:
    print("error in rpc thread")
    logger.error(e, exc_info=True)

predictor_model = "models/shape_predictor_68_face_landmarks.dat"
face_recognition_model = 'models/dlib_face_recognition_resnet_model_v1.dat'
fName = "classifier.pkl"

#webcam number
cap = cv2.VideoCapture(0)
face_detector = dlib.get_frontal_face_detector()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
face_pose_predictor = dlib.shape_predictor(predictor_model)
face_aligner = openface.AlignDlib(predictor_model)

# face_name = "Adnan Sayed"
saveCountReset = False

outOfRecording = 40
count = 0

saveImgLoc = "../untitled/Image"


def saveCapturedImg(image):
    im = Image.fromarray(image)

    with open("data/configs.json") as df:
        data = json.load(df)
Пример #6
0

dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat")
torchNetworkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')


labelsPath = os.path.join(generatedDir, 'labels.csv')
repsPath = os.path.join(generatedDir, 'reps.csv')


imgDim = 96
imgSize = 96
landmarks = 'outerEyesAndNose'
ldaDim = 1

align_pred = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(torchNetworkModel, imgDim=imgDim,
                              cuda=True)


def getRep(imgPath, args, multiple=False):
    start = time.time()
    bgrImg = cv2.imread(imgPath)
    if bgrImg is None:
        raise Exception("Unable to load image: {}\n".format(imgPath))

    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    if args.verbose:
        print("  + Original size: {}".format(rgbImg.shape))
        print("Loading the image took {} seconds.".format(time.time() - start))
Пример #7
0
import openface
import cv2
import os

# some initial variable setup (!)
# TODO figure out what this is doing
modelDir = '/root/openface/models/'
dlibModelDir = os.path.join(modelDir, 'dlib')
align = openface.AlignDlib(
    os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
imgDim = 96
openfaceModelDir = os.path.join(modelDir, 'openface')
net = openface.TorchNeuralNet(
    os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'), imgDim)


def getRep(imgPath):

    # load some image
    bgrImg = cv2.imread(imgPath)

    # couldn't load image
    if bgrImg is None:
        return None

    # load some other image (rgb?)
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    # build the bounding box
    bb = align.getLargestFaceBoundingBox(rgbImg)
    def __init__(self):

        self.training = True
        self.trainingEvent = threading.Event()
        self.trainingEvent.set()

        self.alarmState = 'Disarmed'  #disarmed, armed, triggered
        self.alarmTriggerd = False
        self.alerts = []
        self.cameras = []

        self.peopleDB = []

        self.camera_threads = []
        self.camera_facedetection_threads = []
        self.people_processing_threads = []
        self.svm = None

        self.video_frame1 = None
        self.video_frame2 = None
        self.video_frame3 = None

        self.fileDir = os.path.dirname(os.path.realpath(__file__))
        self.luaDir = os.path.join(self.fileDir, '..', 'batch-represent')
        self.modelDir = os.path.join(self.fileDir, '..', 'models')
        self.dlibModelDir = os.path.join(self.modelDir, 'dlib')
        self.openfaceModelDir = os.path.join(self.modelDir, 'openface')

        parser = argparse.ArgumentParser()
        parser.add_argument('--dlibFacePredictor',
                            type=str,
                            help="Path to dlib's face predictor.",
                            default=os.path.join(
                                self.dlibModelDir,
                                "shape_predictor_68_face_landmarks.dat"))
        parser.add_argument('--networkModel',
                            type=str,
                            help="Path to Torch network model.",
                            default=os.path.join(self.openfaceModelDir,
                                                 'nn4.small2.v1.t7'))
        parser.add_argument('--imgDim',
                            type=int,
                            help="Default image dimension.",
                            default=96)
        parser.add_argument('--cuda', action='store_true')
        parser.add_argument('--unknown',
                            type=bool,
                            default=False,
                            help='Try to predict unknown people')

        self.args = parser.parse_args()
        self.align = openface.AlignDlib(self.args.dlibFacePredictor)
        self.net = openface.TorchNeuralNet(self.args.networkModel,
                                           imgDim=self.args.imgDim,
                                           cuda=self.args.cuda)

        #////////////////////////////////////////////////////Initialization////////////////////////////////////////////////////

        #self.change_alarmState()
        #self.trigger_alarm()

        #self.trainClassifier()  # add faces to DB and train classifier

        #default IP cam
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("rtsp://*****:*****@192.168.1.64/Streaming/Channels/2"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
        #self.cameras.append(Camera.VideoCamera("debugging/iphone_distance1080pHD.m4v"))
        self.cameras.append(Camera.VideoCamera("debugging/Test.mov"))
        #self.cameras.append(Camera.VideoCamera("debugging/Test.mov"))
        #self.cameras.append(Camera.VideoCamera("debugging/rotationD.m4v"))
        #self.cameras.append(Camera.VideoCamera("debugging/example_01.mp4"))

        #self.change_alarmState()
        #self.trigger_alarm()
        self.getFaceDatabaseNames()
        #self.trainClassifier()  # add faces to DB and train classifier

        #processing frame threads- for detecting motion and face detection

        for i, cam in enumerate(self.cameras):
            self.proccesing_lock = threading.Lock()
            thread = threading.Thread(name='frame_process_thread_' + str(i),
                                      target=self.process_frame,
                                      args=(cam, ))
            thread.daemon = False
            self.camera_threads.append(thread)
            thread.start()

        #Threads for alignment and recognition

        # for i, cam in enumerate(self.cameras):
        #   #self.proccesing_lock = threading.Lock()
        #   thread = threading.Thread(name='face_process_thread_' + str(i),target=self.people_processing,args=(cam,))
        #   thread.daemon = False
        #   self.people_processing_threads.append(thread)
        #   thread.start()

        #Thread for alert processing
        self.alerts_lock = threading.Lock()
        thread = threading.Thread(name='alerts_process_thread_',
                                  target=self.alert_engine,
                                  args=())
        thread.daemon = False
        thread.start()
Пример #9
0
from tornado import gen
import sys
import os
import logging

OPENFACE_ROOT = os.path.abspath(os.path.join(__file__, '../../openface'))
sys.path.append(OPENFACE_ROOT)

import openface

FORMAT = '[%(levelname)1.1s %(asctime)s %(name)s:%(lineno)d] %(message)s'
logger = logging.getLogger("lib.openface")

IMAGE_SIZE = 96
ALIGN = openface.AlignDlib(
    OPENFACE_ROOT + "/models/dlib/shape_predictor_68_face_landmarks.dat")
NET = None


@gen.coroutine
def hash_face(image, bb=None, alignedFace=None):
    global NET
    if NET is None:
        logging.debug("Loading openface network")
        NET = openface.TorchNeuralNet(OPENFACE_ROOT +
                                      "/models/openface/nn4.small2.v1.t7",
                                      imgDim=IMAGE_SIZE,
                                      cuda=False)

    # this function can take a while to run, so we defer to the ioloop in face
    # there are other things that need to be taken care of.  Since this
Пример #10
0
def loadOpenfaceAlignerModel(models_dir,
                             model='shape_predictor_68_face_landmarks.dat',
                             debug=False):
    openface_aligner = openface.AlignDlib(
        os.path.join(models_dir, 'openface', model))
    return openface_aligner
Пример #11
0
 def __init__(self):
     self.face_aligner = openface.AlignDlib(self.predictor_model)
Пример #12
0
class Arg:
    def __init__(self, dlibFacePredictor = os.path.join(
            dlibModelDir,
            "shape_predictor_68_face_landmarks.dat"),
    networkModel=os.path.join(
            openfaceModelDir,
            'nn4.small2.v1.t7'),    imgDim = 96, captureDevice = 0, width = 320, height = 240, threshold= 0.5, classifierModel = SVC(kernel = 'rbf', C= 5), cuda = False):
    self.dlibFacePredictor = dlibFacePredictor
    self.networkModel = networkModel
    self.imgDim = imgDim
    self.captureDevice = captureDevice
    self.width = width
    self.height  = height
    self.classifierModel = classifierModel
    self.cuda = cuda

'''
    getRep returns the feature of each faces
'''
def getRep(bgrImg, net):
    start = time.time()
    if bgrImg is None:
        raise Exception("Unable to load image/frame")

    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

    

    start = time.time()

    # Get the largest face bounding box
    # bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box

    # Get all bounding boxes
    bb = align.getAllFaceBoundingBoxes(rgbImg)

    if bb is None:
        # raise Exception("Unable to find a face: {}".format(imgPath))
        return None
    start = time.time()

    alignedFaces = []
    for box in bb:
        alignedFaces.append(
            align.align(
                args.imgDim,
                rgbImg,
                box,
                landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
    if alignedFaces is None:
        raise Exception("Unable to align the frame")
    start = time.time()

    reps = []
    for alignedFace in alignedFaces:
        reps.append(net.forward(alignedFace))
    # print reps
    return reps

'''
    infer() returns the predicted people in the frame as well as 
    the corresponding information (e.g. summary of shared posts and similarty between he/she and you) stored in server.
'''
def infer(img, args):
    #assume all attendee profile pictures have been downloaded into ./attendee/, with attendee id being file name
    image_list = []
    id_list = []
    for filename in glob.glob('attendee/*.png'): #assuming gif
        im=Image.open(filename)
        id_list.append(filename)
        image_list.append(im)


    net = openface.TorchNeuralNet(args.networkModel, args.imgDim)
    reps = getRep(img, net) #return the detected and aligned faces in the video frame
    persons = []
    infos = []
    similarities = []
    for rep in reps:
        try:
            rep = rep.reshape(1, -1)
        except:
            print "No Face detected"
            return (None, None, None)
        start = time.time()

        for attendee_img in image_list:
            d = rep-getRep(attendee_img, net)
            distances.append(np.dot(d,d))
        # print predictions
        minI = np.argmin(distances) #Returns the indices of the maximum values along an axis.
        attendee_id = id_list[minI]
        url = serverAttendeeRoot + "/" + attendee_id
        r = requests.get(url)
        person = r.json()['name']
        info = r.json()['info']
        similarity = r.json()['similarity']
        persons.append(person)
        infos.append(info)
        similarities.append(similarity)
        
    return (persons, infos,similarities)


if __name__ == '__main__':
    


    args = Arg()

    align = openface.AlignDlib(args.dlibFacePredictor)
    net = openface.TorchNeuralNet(
        args.networkModel,
        imgDim=args.imgDim, cuda = args.cuda)

    # Capture device. Usually 0 will be webcam and 1 will be usb cam.
    video_capture = cv2.VideoCapture(args.captureDevice)
    video_capture.set(3, args.width) 
    video_capture.set(4, args.height) 


    while True:
        ret, frame = video_capture.read()
        persons, infos, similarities = infer(frame, args)
        if persons == None: continue
        for i, value in enumerate(similarities):
            #if the similarities between you and this attendee is greater than 0.7, mark green
            if similarities[i] > 0.7: 
                cv2.putText(frame, "Name: {} Info: {}".format(person, info),
                            (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
            #otherwise, mark white
            else:
                cv2.putText(frame, "Name: {} Info: {}".format(person, info),
                            (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            cv2.imshow('', frame)
        # quit the program on the press of key 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Пример #13
0
def affine_transformation(human_files=None,
                          pred=None,
                          detec=None,
                          preview=False,
                          image_num=None,
                          blur=False,
                          frame=False,
                          image_clear=None,
                          rect=None):
    '''
        For each Image in the dataset
            + Extract the key points
            + Do affine transformation
            + Store it
    '''
    #Affine transformation
    falsy_dir = '../dataset/falsy/'
    face_aligner = openface.AlignDlib(auxilary.path_to_shape_predictor)
    affine_dir = '../dataset/lfw_affine/'

    if frame:
        alignedFace = face_aligner.align(96, image_clear, rect, \
                landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        return alignedFace
    if preview:
        # for i ,human_file in enumerate(human_files):
        #     folders =  (get_folder_from_path(human_file))
        #     folders = folders.split('\\')
        #     if folders[0] == 'LeBron_James':
        #         print (i, human_file)
        # input("e")
        image_count = len(human_files) - 1
        rand_int = np.random.random_integers(0, image_count)
        print(f'picked random image number: {rand_int}')
        # rand_int = image_num
        human_file = human_files[rand_int]
        while not os.path.exists(human_file):
            rand_int = np.random.random_integers(0, image_count)
            human_file = human_files[rand_int]
        state, shape, rect, image = facial_landmarks.get_shape(
            human_file, pred, detec)
        while not state:
            state, shape, rect, image = facial_landmarks.get_shape(
                human_file, pred, detec)

        image = facial_landmarks.draw_landmarks(image, shape, rect, blur=blur)
        alignedFace = face_aligner.align(96, image, rect, \
                landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        cv2.imshow("Aligned Face", alignedFace)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        return alignedFace, get_name_from_path(human_file), human_file

    for i, human_file in enumerate(human_files):
        state, shape, rect, image = facial_landmarks.get_shape(
            human_file, pred, detec)
        if state:
            file_path = affine_dir + get_folder_from_path(human_file)
            create_folder(affine_dir + get_name_from_path(human_file))
            alignedFace = face_aligner.align(96, image, rect, \
                    landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
            cv2.imwrite(file_path, alignedFace)
            # plt.imshow(face_aligned)
        else:
            print('Sorry, that image does not have faces!')
            file_name = human_file.split('/')
            file_name = file_name[len(file_name) - 1]
            file_name = file_name.split('\\')
            falsy_img_pth = falsy_dir + file_name[len(file_name) - 1]
            os.replace(human_file, falsy_img_pth)
        if i % 100 == 0:
            print(f'Working: .... iteration {i}')
fileDir = os.path.dirname(os.path.realpath(__file__))

SERVER_IMG_DIM = 80
SERVER_CUDA = False

SERVER_MODE = "Stateful"
SERVER_DLIB_FACEPREDICTOR = os.path.join(
    fileDir, "FacePredictor",
    "shape_predictor_68_face_landmarks.dat")  # Path to dlib's face predictor
SERVER_OPENFACE_MODEL = os.path.join(
    fileDir, "Openface", "nn4.small2.v1.t7")  # Opencface torch net model

SERVER_PRETRAINED = os.path.join(fileDir, "Pretrained", "classifier.pkl")
SERVER_MULT_FACE_INFER = True

align = openface.AlignDlib(SERVER_DLIB_FACEPREDICTOR)

# Output folder for performance measure
SERVER_PROFILE_ENABLE = True
SERVER_PROFILE_DIR = os.path.join(fileDir, 'Profile')

# Parallel computing optimization
SERVER_FACE_SEARCH_OPTIMIZE = True
SERVER_FACE_SEARCH_PADDING = 0.5

SERVER_USE_PYTORCH = False
if SERVER_USE_PYTORCH:
    from OpenFacePytorch.loadOpenFace import prepareOpenFace
    net = prepareOpenFace(useCuda=False).eval()
else:
    net = openface.TorchNeuralNet(SERVER_OPENFACE_MODEL,
Пример #15
0
import argparse
import os
from scipy import misc
import numpy as np
import time
import pickle
import openface
from scipy.misc import imsave

# path managing
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')  # path to the model directory
dlibModelDir = os.path.join(modelDir, 'dlib')  # dlib face detector model
openfaceModelDir = os.path.join(modelDir, 'openface')

dlib_aligner = openface.AlignDlib(dlibModelDir +
                                  "/shape_predictor_68_face_landmarks.dat")


def align_face(image, output_size=96, skip_multi=False):
    outRgb = dlib_aligner.align(
        output_size,
        image,
        landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE,
        skipMulti=skip_multi)
    # out Rgb might be none
    return outRgb


def align_images_in_folder(in_folder,
                           cleanup=False,
                           save=False,
def face_augmentation_server():
    print 'starting up!'

    align = openface.AlignDlib(SHAPE_PREDICTOR_PATH)
    net = openface.TorchNeuralNet(FACE_NN_PATH)

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.bind((HOST, PORT))
    sock.listen(1)
    print "server started"

    # signal to the lambda that the server is ready
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    count = 0
    not_connected = True
    while (not_connected):
        try:
            s.connect(('localhost', 10001))
            not_connected = False
        except:
            print 'waiting for host to come up', count
            if (count >= 30):
                print 'waited too long'
                sys.exit(-1)

            time.sleep(1)
            count += 1

    s.close()

    end = False
    while (not end):
        conn, addr = sock.accept()
        print 'connection made'

        print 'getting data'
        img_base64 = ''
        while True:
            data = conn.recv(MAX_BUFFER_SIZE)
            if (data[-1] == ':'):
                img_base64 += data[:-1]
                break

            img_base64 += data

        print 'checking end condition'
        poison_symbol = img_base64[0]
        img_base64 = img_base64[1:]
        if (poison_symbol == 'S'):
            end == True
            conn.close()
            sock.close()
            break

        # process image
        print 'decode image'
        bio = io.BytesIO(base64.b64decode(img_base64))
        compressed_img = np.fromstring(bio.read(), dtype=np.uint8)
        bgrImg = cv2.imdecode(compressed_img, cv2.IMREAD_COLOR)
        rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

        print 'augmenting image'
        augmented_images = augment_image(rgbImg)

        print 'getting face vectors'
        face_feature_vectors = []
        for rgbImg in augmented_images:
            try:
                print 'adding vector'
                face_feature_vector = get_face_vector(rgbImg, align, net)
                face_feature_vectors.append(face_feature_vector)
            except Exception as e:
                sys.stderr.write(str(e) + '\n')

        # send back resulting face feature vectors
        print 'sending back face vectors'
        output_csv = ''
        for vector in face_feature_vectors:
            output_csv += ','.join(map(str, vector)) + '\n'

        conn.sendall(output_csv + ':')

        # close connection
        print 'closing connection'
        conn.close()

    print 'done'
from utilityScripts import get_frame_from_video
import faceSpoofValidation

mlbp = features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2))

faceSpoofValidator = faceSpoofValidation.FaceSpoofValidator(
    mlbp, "classifiers/casia.pkllinear")

faceFrames = get_frame_from_video.get_frames(
    "/home/doru/Desktop/Licenta/Implementation/databases/"
    "cbsr_antispoofing/test_release/1/3.avi",
    1000,
)

align = openface.AlignDlib(
    "/home/doru/Desktop/Licenta/Implementation/models/dlib/shape_predictor_68_face_landmarks"
    ".dat")

for frame in faceFrames:
    start = time.time()
    # Get all bounding boxes
    bbs = align.getAllFaceBoundingBoxes(frame)

    if bbs is None or len(bbs) == 0:
        raise Exception("No faces detected")

    alignedFaces = []

    for box in bbs:
        alignedFaces.append(
            align.align(
Пример #18
0
import cv2
import openface

bgrImg = cv2.imread('test.jpg')
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)

align = openface.AlignDlib('shape_predictor_68_face_landmarks.dat')
net = openface.TorchNeuralNet()

bb = align.getLargestFaceBoundingBox(rgbImg)
alignedFace = align.align(
    96, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
rep = net.forward(alignedFace)

print rep
    total_dist = 0
    for pt, pred in zip(pt_labels, landmarks):
        dist = np.linalg.norm(pt - pred)
        total_dist = total_dist + dist
    avg_dist = np.divide(total_dist, 68)
    return np.divide(avg_dist, norm)


if __name__ == "__main__":
    i_images = indoorDF['imgPath']
    i_points = indoorDF['points']
    o_images = outdoorDF['imgPath']
    o_points = outdoorDF['points']
    g_images = globalDF['imgPath']
    g_points = globalDF['points']
    face_model = openface.AlignDlib(dlib_model)
    total_indoor = 0
    total_outdoor = 0
    tot_indoor_det = 0
    tot_outdoor_det = 0
    resultsDF = pd.DataFrame(columns=('experiment','type','loss',\
            'total_detected','avg_loss'))
    iter_count = 0
    for img, pts in zip(i_images, i_points):
        rgbImg = cv2.imread(img)
        landmarks = get_landmarks(face_model, rgbImg)
        if landmarks is not None:
            cleaned = clean_points(pts)
            tot_indoor_det = tot_indoor_det + 1
            for (x, y) in landmarks:
                cv2.circle(rgbImg, (x, y), 1, (0, 0, 255), -1)
Пример #20
0
buff = 1024

import keras
keras.backend.set_learning_phase(0)  #set test phase
model = keras.models.load_model('gaze_1017_middle50.h5')

import tensorflow as tf
graph = tf.get_default_graph()

import openface
modelDir = os.path.join('/Users/dialog/openface/models')
dlibModelDir = os.path.join(modelDir, 'dlib')
#openfaceModelDir = os.path.join(modelDir, 'openface')
align_path = os.path.join(dlibModelDir,
                          "shape_predictor_68_face_landmarks.dat")
align = openface.AlignDlib(align_path)


def predict(model, x_data):
    """
    視線の推定を行う関数
    """
    y = model.predict(x_data)[0]
    #print(y)
    return y


def getRep(bgrImg):
    """
    読み込んだ画像から目の部分を抽出する関数
    """
Пример #21
0
    def forward(self, aligned):
        return np.array(self.net.compute_face_descriptor(self.lastimg, aligned))

    def align(self, a, img, box, landmarkIndices):
        self.lastimg = img
        return self.sp(img, box)

DLIB_NN = DlibNetAdapter()

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')

DEF_ALIGN = openface.AlignDlib(DLIB_MODEL) 
DEF_DETECTOR = DEF_ALIGN
#DEF_DETECTOR = opencv_detector
DEF_NET = openface.TorchNeuralNet(NN_MODEL, imgDim=IMG_DIM, cuda=CUDA)
if TRACKING_ENABLED:
    TRACKER = tracking.Tracking(dlib.correlation_tracker)
else:
    TRACKER = None


# scale rectangle to factor
def scaleRect(rect, factor):
    r_l = int(rect.left()*factor)
    r_t = int(rect.top()*factor)
    r_r = int(rect.right()*factor)
    r_b = int(rect.bottom()*factor)
Пример #22
0
# This code takes pictures from a folder, recognizes the faces in them and colours the boundingBoxes black.
# Made for testing.

import openface
import cv2
import os
import sys
from scipy import misc
from PIL import Image

args = sys.argv

#args[1]: The path to the file 'shape_predictor_68_face_landmarks.dat' found in the openface/models/dlib folder.
align = openface.AlignDlib(args[1])

#args[2]: The path to a folder with pictures (of people)
path = args[2]

for file in os.listdir("output"):
    os.remove("output/" + file)

for file in os.listdir(path):
    a = raw_input("Press enter to continue, 0 and enter to quit: ")
    if a == "0":
        break
    image = misc.imread(path + "/" + file)
    print("File: " + file)
    boundingBoxes = align.getAllFaceBoundingBoxes(image)
    amount = len(boundingBoxes)
    if amount == 0:
        print("Didn't find any faces.")
Пример #23
0

app = Flask(__name__, static_url_path = "")

# global variables
brokerURL = ''
outputs = []
timer = None
lock = threading.Lock()
camera = None
cameraURL = ''
total_size = 0 

threshold = 1.0
imageDemension = 96
align = openface.AlignDlib('/root/openface/models/dlib/shape_predictor_68_face_landmarks.dat')
net = openface.TorchNeuralNet('/root/openface/models/openface/nn4.small2.v1.t7', imageDemension)

saveLocation = ''
featuresOfTarget = None
targetedFeaturesIsSet = False

@app.errorhandler(400)
def not_found(error):
    return make_response(jsonify( { 'error': 'Bad request' } ), 400)

@app.errorhandler(404)
def not_found(error):
    return make_response(jsonify( { 'error': 'Not found' } ), 404)

@app.route('/admin', methods=['POST'])
Пример #24
0
# coding:utf-8
import os
import openface
import cv2
import pickle
import numpy as np

fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
classifierDir = os.path.join(fileDir, '..', 'generated-embeddings')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
predict = os.path.join(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')
torchmodel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
align = openface.AlignDlib(predict)
net = openface.TorchNeuralNet(torchmodel)
landmarkIndices = openface.AlignDlib.OUTER_EYES_AND_NOSE
predictdatabase = os.path.join(classifierDir, 'classifier.pkl')  # 人脸数据库


# 获取人脸的处理
def getRep(img):
    bgrImg = cv2.imread(img)
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
    bbs = align.getAllFaceBoundingBoxes(rgbImg)
    reps = []
    for bb in bbs:
        facelandmarks = align.findLandmarks(rgbImg, bb)
        alignedFace = align.align(96,
                                  rgbImg,
                                  bb,
Пример #25
0
from pathlib import Path
from sklearn.neighbors import KNeighborsClassifier
from sklearn import linear_model

dir_path = os.path.dirname(os.path.realpath(__file__))

model_dir = os.path.join(dir_path, 'models')
faces_dir = os.path.join(dir_path, 'faces')

dlib_face_predictor = os.path.join(model_dir,
                                   'shape_predictor_68_face_landmarks.dat')
network_model = os.path.join(model_dir, 'nn4.small2.v1.t7')

img_dim = 96

align = openface.AlignDlib(dlib_face_predictor)
net = openface.TorchNeuralNet(network_model, img_dim)


def face_rep(rgb_img):
    bb = align.getLargestFaceBoundingBox(rgb_img)
    alignedFace = align.align(
        img_dim,
        rgb_img,
        bb,
        landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)

    return net.forward(alignedFace)


def read_path(img_path):
import openface
import cv2
import dlib
from glob import glob
from utils.imutils import *
from scipy.spatial.distance import euclidean
import pandas as pd

PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
INNER_EYES_MOUTH_AND_NOSE = openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP
FACE_SIZE = 128
IN_PATH = "./MultiEmoVA-images/images/all/*.jpg"
FEATURES_OUT_PATH = "./features/"

# Create facial landmark detector using dlib model
align = openface.AlignDlib(PREDICTOR_PATH)

num_faces_in_image = {}
context2 = []
context3 = []
context4 = []

# Loop over images in the dataset to extract features
for image_path in glob(IN_PATH):
    # Extract image name
    image_id = image_path.split("/")[-1][:-4]

    # Read in image and detect faces
    image = cv2.imread(image_path)
    faces = align.getAllFaceBoundingBoxes(image)
Пример #27
0
# This Script gets Executed inside the 'bamos/openface' Docker Container #

import openface  # The Openface package is imported inside the Docker Container
import numpy as np

import os
import socket

MODEL_DIR = os.path.join(os.path.dirname(__file__), 'models')
DLIB_DIR = os.path.join(MODEL_DIR, 'dlib')
OPENFACE_DIR = os.path.join(MODEL_DIR, 'openface')
DIM = 96

ADDRESS = ('', 8989)

align = openface.AlignDlib(
    os.path.join(DLIB_DIR, "shape_predictor_68_face_landmarks.dat"))
net = openface.TorchNeuralNet(os.path.join(OPENFACE_DIR, "nn4.small2.v1.t7"),
                              DIM)

server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDRESS)

try:
    while True:
        server.listen(5)
        connection, address = server.accept()

        shape = np.frombuffer(connection.recv(3 * 4), np.int32)
        length = np.prod(shape)

        image = np.empty(shape, np.uint8)
Пример #28
0
sys.path.append(os.path.abspath(os.path.join(cur_path, '../util/')))
from img_hash import EXTS, phash, otsu_hash, otsu_hash2, hamming
from img_histo import gray_histo, rgb_histo, yuv_histo, hsv_histo, abs_dist
from img_gist import gist
from kmeans import eculidean_dist, norm0_dist
from img_hog import hog2, hog3, hog_lsh_list, hog_histo
from img_sift import sift2, sift_histo
from lsh import LSH_hog, LSH_sift
from rerank import blending, ensembling
import cPickle
import openface

openfacedir = '/home/ubuntu/Documents/openface'
modelDir = osp.join(openfacedir, 'models/dlib',
                    "shape_predictor_68_face_landmarks.dat")
align = openface.AlignDlib(modelDir)
netDir = osp.join(openfacedir, 'models/openface', 'nn4.small2.v1.t7')
net = openface.TorchNeuralNet(netDir, imgDim=96, cuda=False)

upload_prefix = './static/upload/'
SETNAME = 'lfw_raw'

with open('../static/url.pkl') as fh:
    urldat = cPickle.load(fh)

app = Flask(__name__)
# db = SQLAlchemy(app)r
UPLOAD_FOLDER = upload_prefix
# ALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
api = Api(app)
from Testing_Face_detector import Image_Sliding
import os
import openface
import pandas as pd
from sklearn.svm import SVC
import face_recognition
import cv2
import pickle

#
path = "../Images"
face_encodings = []
Names = []
face_aligner = openface.AlignDlib("shape_predictor_68_face_landmarks.dat")


def encoding_images():
    for root, directory, filenames in os.walk(path):
        for file_ in filenames:
            path_image = os.path.join(".", root, file_)
            # print(path_image)
            image = cv2.imread(path_image)
            image_new = cv2.resize(image, (300, 300))
            image_ = cv2.resize(image, (300, 300))
            # cv2.imshow("", image)
            # cv2.waitKey()
            name = file_.split('.')
            name__ = ""
            for n in name:
                if n == '.' or n == 'jpeg' or n == 'jpg' or n == 'png' or n == 'PNG':
                    pass
Пример #30
0
TFILE = "traindata.csv"
MPATH = "Mugshot/"
PPATH = "Test/"
L = []
Y = []
input_image_size = 160
persons = 0
predictor_path = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)

facenet.load_model("20170512-110547/20170512-110547.pb")
# Create a HOG face detector using the built-in dlib class
face_pose_predictor = dlib.shape_predictor(predictor_path)
face_aligner = openface.AlignDlib(predictor_path)


def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1 / std_adj)
    return y


def facialPoints(img):
    global detector
    global predictor
    global face_pose_predicitor
    global face_aligner