示例#1
0
 def _load_predictor(self, predictor):
     predictor = utils.load_to_file(predictor)
     if predictor.endswith('.bz2'):
         decompressor = bz2.BZ2Decompressor()
         with open(predictor, 'rb') as f, tempfile.NamedTemporaryFile('wb') as g:
             g.write(decompressor.decompress(f.read()))
             return dlib.shape_predictor(g.name)
     else:
         return dlib.shape_predictor(predictor)
示例#2
0
def gen_train(path,label):
    res = ""
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(sys.argv[2])
    for f in glob.glob(os.path.join(path, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            # print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            #     k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            # print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
            #                                           shape.part(1)))
            val = feature_calc(shape)
            res += str(label) + ' '
            for x in range(1,len(val)+1):
                res += str(x) + ':' + str(val[x-1])
                if x != len(val):
                    res += ' '
                else:
                    res += '\n'
    with open(str(label)+'.csv','w') as fout:
        fout.write(res)
    def __init__(self, facePredictor, padding=0.2):
        """
        """
        assert facePredictor is not None

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
        self.padding = padding
        # add padding
        new_template =[]
        for item in  TEMPLATE:
            new_item = ((item[0]+self.padding)/(2*self.padding+1),(item[1]+self.padding)/(2*self.padding+1))
            new_template.append(new_item)
        self.new_template = np.float32(new_template)
        
        # this shape is reference from dlib implement.
        self.mean_shape_x = [0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
            0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
            0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
            0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
            0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
            0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
            0.553364, 0.490127, 0.42689]#17-67
        self.mean_shape_y = [0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
            0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
            0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
            0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
            0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
            0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
            0.784792, 0.824182, 0.831803, 0.824182] #17-67
示例#4
0
def calcPoints(path) :
    predictor_path = '/Users/vovanmozg/Downloads/bigdata/shape_predictor_68_face_landmarks.dat'
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    print("Processing file: {}".format(path))
    img = io.imread(path)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    if len(dets) != 1 :
        return False;

    print("Number of faces detected: {}".format(len(dets)))
    points = [];
    for k, d in enumerate(dets):
        #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        #    k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        #print(numpy.matrix([[p.x, p.y] for p in shape.parts()]))
        for p in shape.parts():
            points.append((p.x, p.y))

    return points
示例#5
0
 def load_model(self):
     self.predictor = dlib.shape_predictor(self.predictor_path)
     self.detector = dlib.get_frontal_face_detector()
     self.mean3DShape, self.blendshapes, self.mesh, self.idxs3D, self.idxs2D = utils.load3DFaceModel(self.FaceModel_file)
     self.projectionModel = models.OrthographicProjectionBlendshapes(self.blendshapes.shape[0])
     self.renderer = FaceRendering.FaceRenderer(width=self.width, height=self.height)
     self.projectionModel = models.OrthographicProjectionBlendshapes(self.blendshapes.shape[0])
def get_landmarks(argv):
    line = argv[1]
    left = argv[2]
    top = argv[3]
    right = argv[2]+argv[4]
    bottom = argv[3]+argv[5]
    rect = dlib.rectangle(int(left),int(top),int(right), int(bottom))
    predictor_path = 'shape_predictor_68_face_landmarks.dat' # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    lmarks = []
    bboxes = []
    #for i,line in enumerate(line_arr):
    #    print('%d/%d'%(i,len(line_arr)))
    img = io.imread(line)
    dets = detector(img, 0)
    if len(dets) == 0:
        rect = dlib.rectangle(0,0,img.shape[0], img.shape[1])
    else:
        rect = dets[0]

    shape = predictor(img, rect)
    xy = _shape_to_np(shape)
    np.savetxt('landmarks.txt', xy)
    
    lmarks.append(xy)
    bboxes.append(rect)

    lmarks = np.vstack(lmarks)
    bboxes = np.asarray(bboxes)
    print lmarks
    return lmarks
示例#7
0
def feature_extraction(img):
    dist = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
    dist = np.asarray(dist)

    #Loading the facial land mark pointer predictor
    predictor_path = '/home/qburst/Desktop/Emotion Detection Final/ED_dist/Necessaries/shape_predictor_68_face_landmarks.dat'

    #Creating objects for frontal face detector and landmark point predictor
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)


    #'dets' has all the faces detected represented as rectangles, their co-ordinates are obtainable by using left(), top(),
    #right(), bottom(). 'dets' is iterable
    
    dets = detector(img, 1)
    
    for rect in dets:
    
        #'shape' has the 68 facial land mark points. Each of which can be accessed using shape.part().co-ordinate
        #eg: shape.part(1).x, shape.part(1).y for (x, y) of point 1. 'shape' is not iterable.
        shape = predictor(img, rect)
        points = _shape_to_np(shape)
        dist_as_np = np.asarray(distance(points))
        dist = np.vstack((dist, dist_as_np))

    dist = np.delete(dist, 0, 0)
    return len(dets), dist, dets        
示例#8
0
 def process_frames_face(self, frames):
     detector = dlib.get_frontal_face_detector()
     predictor = dlib.shape_predictor(self.face_predictor_path)
     mouth_frames = self.get_frames_mouth(detector, predictor, frames)
     self.face = np.array(frames)
     self.mouth = np.array(mouth_frames)
     self.set_data(mouth_frames)
示例#9
0
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        self.fourcc = cv2.cv.CV_FOURCC(*'XVID')
        self.out = cv2.VideoWriter('data/' + subject + '/' + video +'/output.avi', -1, 9, (width, height))
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
        # Imaging device - must be a connected camera (not an ip camera or mjpeg
        # stream)
        self.camera = Camera(camera=0)  # first camera by default

        self.w, self.h = 0, 0
        self.pressed = 0
        # Containerized analysis of recieved image frames (an openMDAO assembly)
        # is defined next.

        # This assembly is designed to handle all image & signal analysis,
        # such as face detection, forehead isolation, time series collection,
        # heart-beat detection, etc.

        # Basically, everything that isn't communication
        # to the camera device or part of the GUI
        self.processor = findFaceGetPulse(bpm_limits=[50, 160],
                                          data_spike_limit=2500.,
                                          face_detector_smoothness=10.)

        # Init parameters for the cardiac data plot
        self.bpm_plot = False
        self.plot_title = "Cardiac info - raw signal, filtered signal, and PSD"

        # Maps keystrokes to specified methods
        # (A GUI window must have focus for these to work)
        self.key_controls = {"s": self.toggle_search,
                             "d": self.toggle_display_plot,
                             "f": self.write_csv}
示例#10
0
文件: core.py 项目: HarryZhu/elk
 def _set_landmark_predictor(self):
     lp = self.getConfig('dataset','dataset_shape_predictor')
     if os.path.isfile(lp):
         self.landmark_predictor = dlib.shape_predictor(lp)
     else:
         print(lp + " is invalid, pls check your config file.")
         logging.error(lp + " is invalid, pls check your config file.")
def get_landmarks(line_arr):
    predictor_path = 'shape_predictor_68_face_landmarks.dat' # http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    lmarks = []
    bboxes = []
    for i,line in enumerate(line_arr):
        print('%d/%d'%(i,len(line_arr)))
        img = io.imread(line)
        dets = detector(img, 0)
        if len(dets) == 0:
            rect = dlib.rectangle(0,0,img.shape[0], img.shape[1])
        else:
            rect = dets[0]

        shape = predictor(img, rect)
        xy = _shape_to_np(shape)
        lmarks.append(xy)
        bboxes.append(rect)

    lmarks = np.vstack(lmarks)
    bboxes = np.asarray(bboxes)

    return lmarks,bboxes
示例#12
0
 def __init__(self):
     self.bridge = CvBridge()
     self.imgDim = 96
     self.align = openface.AlignDlib(DLIB_FACEPREDICTOR)
     self.face_pose_predictor = dlib.shape_predictor(DLIB_FACEPREDICTOR)
     self.net = openface.TorchNeuralNet(NETWORK_MODEL, self.imgDim)
     self.landmarkIndices = openface.AlignDlib.OUTER_EYES_AND_NOSE
     self.face_detector = dlib.get_frontal_face_detector()
     self.count = 0
     self.face_count = 0 # Cumulative total faces in training.
     self.max_face_count = 10
     self.train = False
     self.enable = True
     self.data_root = os.path.join(CWD, 'faces')
     self.train_dir = os.path.join(self.data_root, 'training-images')
     self.aligned_dir = os.path.join(self.data_root, 'aligned-images')
     self.classifier_dir = CLASSIFIER_DIR
     self.clf, self.le = None, None
     self.load_classifier(os.path.join(self.classifier_dir, 'classifier.pkl'))
     self.node_name = rospy.get_name()
     self.multi_faces = False
     self.threshold = 0.5
     self.detected_faces = deque(maxlen=10)
     self.training_job = None
     self.stop_training = threading.Event()
     self.faces = []
     self.event_pub = rospy.Publisher(
         'face_training_event', String, latch=True, queue_size=1)
     self.faces_pub = rospy.Publisher(
         '~faces', Faces, latch=True, queue_size=1)
     self.imgpub = rospy.Publisher(
         '~image', Image, latch=True, queue_size=1)
     self._lock = threading.RLock()
     self.colors = [ (255, 0, 0), (0, 255, 0), (0, 0, 255),
         (255, 255, 0), (255, 0, 255), (0, 255, 255) ]
示例#13
0
    def __init__(self, landmarks=None, openface=None, size=96, torch="th"):
        """Face detection

        Parameters
        ----------
        landmarks : str
            Path to dlib's 68 facial landmarks predictor model.
        size : int
            Size of the normalized face thumbnail.
        openface : str
            Path to openface FaceNet model.
        """
        super(Face, self).__init__()

        # face detection
        self._face_detector = dlib.get_frontal_face_detector()

        # landmark detection
        if landmarks is not None:
            self._landmarks_detector = dlib.shape_predictor(landmarks)

        # normalization
        self.size = size
        self._landmarks = MINMAX_TEMPLATE * self.size

        if openface is not None:
            self._net = TorchWrap(torch=torch, model=openface, size=self.size, cuda=False)
示例#14
0
	def __init__(self):
		self.PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
		MOUTH_POINTS = list(range(48, 61))
		self.OVERLAY_POINTS = [MOUTH_POINTS]

		self.detector = dlib.get_frontal_face_detector()
		self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)
示例#15
0
 def __init__(self):
     self._detector = dlib.get_frontal_face_detector()
     pose_predictor_path = '../models/dlib/shape_predictor_68_face_landmarks.dat'
     self._predictor = dlib.shape_predictor(pose_predictor_path)
     if not self.SERVER_NO_GUI_MODE:
         self._win = dlib.image_window()
     self._lock = RLock()
示例#16
0
def getVisualFetB():
	'''
	Second attempt at extracting features
	Simply extract the face from each frame, followed by extracting details of the landmarks
	The generated file for each video contains a numpy array of the vectors (Of facial landmarks) 
	'''

	# fileName = '../training/training_gt.csv'
	# trueMap = getTruthVal(fileName)

	print 'Started extracting features B'

	videoPath = '../training/download_train-val/trainFiles/'
	vidNames = os.listdir(videoPath)
	vidNames = [x for x in vidNames if x.endswith(".mp4")]

	# videoPath = '../training/download_train-val/validationFiles/'
	# vidNames = os.listdir(videoPath)
	# vidNames = [x for x in vidNames if x.endswith(".mp4")]

	# vidNames.extend(vidNamesTrain)

	# Initialize detectors, load it for face detection
	predictorPath = 'coreData/shape_predictor_68_face_landmarks.dat'
	faceDetector = dlib.get_frontal_face_detector()
	shapePredictor = dlib.shape_predictor(predictorPath)

	saveFetPath = 'tmpData/visualFetB/'
	saveVidPath = 'tmpData/vidData/'

	if not os.path.exists(saveFetPath):
		os.makedirs(saveFetPath)

	if not os.path.exists(saveVidPath):
		os.makedirs(saveVidPath)

	vidNames = vidNames

	for i in range(len(vidNames)):
		fileName = vidNames[i]

		if (os.path.isfile(saveFetPath+fileName.strip('.mp4')+'.npy')):
			continue

		frameList = GetFrames(videoPath+fileName, redFact = 0.5, skipLength = 5)
		savePath = saveVidPath + fileName.strip('.mp4')
		
		# np.save(savePath, frameList)
		# Do not save, too large!

		faceList = DetectFaceLandmarksInList(frameList, faceDetector, shapePredictor)
		savePath = saveFetPath + fileName.strip('.mp4')
		np.save(savePath, faceList)

		print ('\r'), ((i*(1.0))/len(vidNames)), 'part completed. Currently at file:', fileName,
		sys.stdout.flush()

	print '\n'
示例#17
0
 def __init__(self, intrinsicMat=K, distortionCof=D, imgWidth=1920, imgHeight=1080):
     self.w = imgWidth
     self.h = imgHeight
     self.K = intrinsicMat
     self.D = distortionCof
     self.R_camInScreen = R_camInScreen
     self.t_camInScreen = t_camInScreen
     self.predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
     self.detector = dlib.get_frontal_face_detector()
 def __init__(self, visulize = True):
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(predictor_path)
     self.fronter = getDefaultFrontalizer()
     if visulize:
         self.win = dlib.image_window()
         self.win2 = dlib.image_window()
     else:
         self.win = self.win2 = None
示例#19
0
文件: faces.py 项目: FNRE/siamese_net
 def __init__(self):
     """Summary."""
     model_filename = 'shape_predictor_68_face_landmarks.dat'
     if not os.path.exists(model_filename):
         os.system(
             'wget http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2')
         os.system('bunzip2 shape_predictor_68_face_landmarks.dat.bz2')
     self.model = dlib.shape_predictor(model_filename)
     self.detector = dlib.get_frontal_face_detector()
    def __init__(self):
        self.net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,cuda=args.cuda)
        self.align = openface.AlignDlib(args.dlibFacePredictor)
        self.neuralNetLock = threading.Lock()
        self.predictor = dlib.shape_predictor(args.dlibFacePredictor)

        logger.info("Opening classifier.pkl to load existing known faces db")
        with open("generated-embeddings/classifier.pkl", 'r') as f: # le = labels, clf = classifier
            (self.le, self.clf) = pickle.load(f) # Loads labels and classifier SVM or GMM
示例#21
0
    def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
示例#22
0
    def __init__(self, predictor_path):
        """

        :rtype : LandmarkExtractor
        """
        self.predictor_path = predictor_path
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.predictor_path)
        self.featureMatrix = FeatureMatrix()
 def __init__(self, Xtr=None):
     self.X = Xtr
     module_path = os.path.split(os.path.dirname(os.path.realpath('__file__')))[0]
     predictor_path = os.path.join(module_path, 'webrtc-emotion-recognition/static/resources',
                                    'shape_predictor_68_face_landmarks.dat') 
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(predictor_path)
     if self.predictor == None:
         print classifier_path
         raise Exception('Predictor dat file was not found.')
示例#24
0
    def __init__(self):      #类在实例化成对象的时候首先调用的方法
        # 使用特征提取器get_frontal_face_detector
        self.detector = dlib.get_frontal_face_detector()
        # dlib的68点模型,使用训练好的特征预测器
        self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

        #使用电脑自带摄像头。
        self.cap = cv2.VideoCapture(0)
        # 设置视频参数,propId设置的视频参数,value设置的参数值
        self.cap.set(3, 480)
示例#25
0
def predict_shape(fname):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(MODEL)

    img = io.imread(fname)
    dets = detector(img, 1)

    for k, d in enumerate(dets): # should only detect one face
        shape = predictor(img, d)
        return shape
示例#26
0
 def __init__(self, predictor_path, classifier_path, scaler_path, max_height=640):
     super(ClosedEyeDetector, self).__init__(max_height=max_height)
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(predictor_path)
     with open(classifier_path) as f:
         self.classifier = cPickle.load(f)
     with open(scaler_path) as f:
         self.scaler = cPickle.load(f)
     self.faceParse = FindAndParseFaces(self.predictor)
     self.scoreEyes = ScoreEyes(self.classifier, self.scaler)
     self.n_people_last = None
示例#27
0
    def __init__(self, predictor):
        """
        Initializes the face detector and landmakr predictor.

        Args:
            predictor: Path to the landmark specification file.
        """
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = None
        if predictor is not None:
            self.predictor = dlib.shape_predictor(predictor)
示例#28
0
    def test_shape_predictor(self):
        predictor = dlib.shape_predictor(SHAPE_PREDICTOR_FNAME)
        image = _load_image_using_pillow(self.face_jpg_path)

        # This is the output of the detector, hardcoded
        detection = dlib.rectangle(left=125, top=56, right=434, bottom=365)
        shape = predictor(image, detection)
        self.assertEqual(len(shape.parts()), 68)
        for p in shape.parts():
            self.assertGreater(p.x, 0)
            self.assertGreater(p.y, 0)
示例#29
0
 def process_frames_face(self, frames):
     """
     Preprocess from frames using face detector
     """
     detector = dlib.get_frontal_face_detector()
     predictor = dlib.shape_predictor(self.face_predictor_path)
     mouth_frames = self.get_frames_mouth(detector, predictor, frames)
     self.face = np.array(frames)
     self.mouth = np.array(mouth_frames)
     if mouth_frames[0] is not None:
         self.set_data(mouth_frames)
示例#30
0
    def __init__(self):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        facePredictor = '../models/facerecog/shape_predictor_68_face_landmarks.dat'
        # pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
示例#31
0
# In[47]:

import dlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np

# In[48]:

# dlib는 image 관련
# detector는 이미지의 얼굴을 찾아줌
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('../models/shape_predictor_5_face_landmarks.dat')

# In[49]:

img = dlib.load_rgb_image('../imgs/12.jpg')
plt.figure(figsize=(16, 10))
plt.imshow(img)
plt.show()

# In[50]:

img_result = img.copy()
dets = detector(img, 1)
# detector에 이미지 주면 얼굴 찾아줌
if len(dets) == 0:  # 사진에 얼굴이 없으면
    print('cannot find faces!')
 def set_model(self, model_name):
     from dlib import shape_predictor
     self.model = shape_predictor(
         './models/facial_landmarks/dlib/shape_predictor_68_face_landmarks.dat'
     )
示例#33
0
 def __init__(self, resize_width=512):
     predictor_path = downloader.check_model()
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(predictor_path)
     self.width = int(resize_width)
示例#34
0
class Instance:
    _instances_count = 0
    face_detector = dlib.get_frontal_face_detector()
    sp = dlib.shape_predictor(predictor_path)
    facerec = dlib.face_recognition_model_v1(face_rec_model_path)
    faces = pickle.load(open(faces_path, "rb"))
    faces_dist_thresh = 0.55

    def __init__(self, event, frame=None):
        self._features = event._features
        self._events = [event]
        self._stability = 1
        self._bbox = event._bbox
        self._label = event._label
        self._identified = False
        self._score = event._score
        self._com = event._com

        self._estimate_object_distance()

        self._name = "%s_%d" % (self._label, Instance._instances_count)
        Instance._instances_count += 1

        if self._label == 'person' and self._identified is False:
            self._identify(frame)

    @property
    def stability(self):
        return self._stability

    @property
    def features(self):
        return self._features

    def _identify(self, frame):
        if TRY_IDENTIFY is False:
            return

        if self.stability % 10 > 0:
            return

        dets = self.face_detector(frame, 1)

        if len(dets) == 0:
            return

        d = dets[0] # assume only one face if any
        shape = self.sp(frame, d)

        face_descriptor = self.facerec.compute_face_descriptor(frame, shape)
        face_descriptor_str = str(face_descriptor).replace('\n', ',')
        face_descriptor_arr = face_descriptor_str.split(',')
        pred = self.faces.predict([face_descriptor_arr])
        dist, _ = self.faces.kneighbors([face_descriptor_arr])
        mean_dist = np.mean(dist)

        if mean_dist < self.faces_dist_thresh:
            rec = pred[0]

            self._identified = True
            self._name = labels_mapping[rec]
        else:
            print("detect face but unknown")

    def update(self, ev, partial=False, frame=None):
        self._events.append(ev)
        self._features = ev._features
        self._label = ev._label
        self._score = ev._score
        self._com = ev._com
        self._bbox = ev._bbox
        self._estimate_object_distance()
        self._stability = min(self._stability + 1, 10)

        if self._label == 'person' and self._identified == False:
            self._identify(frame)

    def _estimate_object_distance(self, far_threshold=3.0, near_threshold=1.5):

        _person_width_in_meters = 0.6
        _focal_length = 530.0

        width_in_pix = float(self._bbox[3]) - float(self._bbox[1])

        dist = (_person_width_in_meters * _focal_length) / width_in_pix

        self._distance = dist

        if dist < near_threshold:
            self._distance_level = 'Near'
        elif dist > far_threshold:
            self._distance_level = 'Far'
        else:
            self._distance_level = 'Mid'
示例#35
0
import dlib
import cv2

visual_image_suffix = "_ImgCANON2"
thermal_image_suffix = "_ImgFLIR2"
image_extention = ".PNG"
image_path = "images\\"

thermal_predictor_path = dlib.shape_predictor("predictors\\predictor4.dat")
thermal_cascade_path = cv2.CascadeClassifier(
    "cascades\\haarcascade_frontalface_default.xml")

visual_cascade_path = cv2.CascadeClassifier(
    "cascades\\haarcascade_frontalface_default.xml")
visual_predictor_path = dlib.shape_predictor(
    "predictors\\shape_predictor_68_face_landmarks.dat")

detection_error_text = "No face detection"
示例#36
0
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import cv2
import dlib
import os
import types
#根据人脸框bbox,从一张完整图片裁剪出人脸,并保存问文件名cropimgname
#如果未检测到人脸,那么返回false,否则返回true
face_detector = dlib.get_frontal_face_detector()
landmark_predictor = dlib.shape_predictor(
    "shape_predictor_68_face_landmarks.dat")


def getface_cortour(imgpath):
    bgrImg = cv2.imread(imgpath)
    if bgrImg is None:
        return None
    rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
    return get_landmark(rgbImg)


def normalize(v):
    """

	:rtype : object
	"""
    norm = np.linalg.norm(v, ord=1)
    if norm == 0:
        norm = np.finfo(v.dtype).eps
    return v / norm

def getLandmarks(image_path):
    cwd = os.path.abspath(os.path.dirname(__file__))
    model_path = os.path.abspath(os.path.join(cwd,
                                              "D:/attentiveness-detection-master/gaze_tracking/trained_models/shape_predictor_68_face_landmarks.dat"))
    predictor = dlib.shape_predictor(model_path)
    face_detector = dlib.get_frontal_face_detector()
    frame = cv2.imread(image_path)
    faces = face_detector(frame)
    if len(faces) ==0 :
        landmarks = None
    else:
        landmarks = predictor(frame, faces[0])

    return  landmarks,frame


if __name__ == "__main__":
    cwd = os.path.abspath(os.path.dirname(__file__))
    model_path = os.path.abspath(os.path.join(cwd, "E:/aut/RD/project/attentiveness-detection-master/gaze_tracking/trained_models/shape_predictor_68_face_landmarks.dat"))
    predictor = dlib.shape_predictor(model_path)
    face_detector = dlib.get_frontal_face_detector()
    frame = cv2.imread("E:/aut/RD/project/attentiveness-detection-master/test_datasets/111.jpg")
    faces = face_detector(frame)
    landmarks = predictor(frame, faces[0])

    lips = Lips(landmarks)


示例#38
0
import cv2
from utils.carve import rect_to_bb, shape_to_np

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w",
                "--weights",
                help="path to facial landmark predictor",
                default="./weights/shape_predictor_68_face_landmarks.dat")
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())

# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["weights"])

# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# get the input image name
image_name = args["image"].split("/")[-1]

# detect faces in the grayscale image
rects = detector(gray, 1)

print(rects)

# loop over the face detections
示例#39
0
 def __init__(self, shape_predictor_config_file):
     self.predictor = dlib.shape_predictor(shape_predictor_config_file)
# shape_predictor and where it should be according to the truth data.
print(("\nTraining accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path, "predictor.dat"))))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml")
print(("Testing accuracy: {}".format(
    dlib.test_shape_predictor(testing_xml_path, "predictor.dat"))))

# Now let's use it as you would in a normal application.  First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor("predictor.dat")
detector = dlib.get_frontal_face_detector()

# Now let's run the detector and shape_predictor over the images in the faces
# folder and display the results.
print("Showing detections and predictions on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print(("Processing file: {}".format(f)))
    img = io.imread(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
#!/usr/bin/env python
# coding: utf-8

# In[ ]:


import cv2
import numpy as np
import dlib
 
webcam = False
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("C:\\Users\\PREETI\\Desktop\\shape_predictor_68_face_landmarks.dat")
 
def empty(a):
    pass
cv2.namedWindow("BGR")
cv2.resizeWindow("BGR",640,240)
cv2.createTrackbar("Blue","BGR",153,255,empty)
cv2.createTrackbar("Green","BGR",0,255,empty)
cv2.createTrackbar("Red","BGR",137,255,empty)
 
def createBox(img,points,scale=5,masked= False,cropped= True):
    if masked:
        mask = np.zeros_like(img)
        mask = cv2.fillPoly(mask,[points],(255,255,255))
        img = cv2.bitwise_and(img,mask)
        #cv2.imshow('Mask',mask)
 
    if cropped:
示例#42
0
def cvloop(run_event):
    global panelA
    global SPRITES
    global image_path
    i = 0
    video_capture = cv2.VideoCapture(0)  #read from webcam
    (x, y, w, h) = (0, 0, 10, 10)  #whatever initial values

    #Filters path
    detector = dlib.get_frontal_face_detector()

    model = "data/shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(
        model
    )  # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2

    while run_event.is_set():
        ret, image = video_capture.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = detector(gray, 0)

        for face in faces:
            (x, y, w, h) = (face.left(), face.top(), face.width(),
                            face.height())

            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)
            incl = calculate_inclination(
                shape[17], shape[26])  #inclination based on eyebrows

            # condition to see if mouth is open
            is_mouth_open = (shape[66][1] - shape[62][1]
                             ) >= 10  #y coordiantes of landmark points of lips

            if SPRITES[0]:

                apply_sprite(image, image_path, w, x, y + 40, incl, ontop=True)

            if SPRITES[1]:
                (x1, y1, w1, h1) = get_face_boundbox(shape, 6)
                apply_sprite(image, image_path, w1, x1, y1 + 275, incl)

            if SPRITES[3]:
                (x3, y3, _, h3) = get_face_boundbox(shape, 1)
                apply_sprite(image, image_path, w, x, y3, incl, ontop=False)

            (x0, y0, w0, h0) = get_face_boundbox(shape, 6)  #bound box of mouth

            if SPRITES[4]:
                (x3, y3, w3, h3) = get_face_boundbox(shape, 7)  #nose
                apply_sprite(image, image_path, w3, x3 - 20, y3 + 25, incl)
                (x3, y3, w3, h3) = get_face_boundbox(shape, 8)  #nose
                apply_sprite(image, image_path, w3, x3 + 20, y3 + 25, incl)

            if SPRITES[5]:
                findRects = []
                upperPath = "/home/admin1/Documents/Flipkart_Hackathon/BodyDetection/haarcascades_cuda/haarcascade_upperbody.xml"
                imageGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                upperCascade = cv2.CascadeClassifier(upperPath)
                upperRect = upperCascade.detectMultiScale(imageGray,
                                                          scaleFactor=1.1,
                                                          minNeighbors=1,
                                                          minSize=(1, 1))

                if len(upperRect) > 0:
                    findRects.append(upperRect[0])
                    print(findRects)

                for obj in findRects:
                    print(obj)
                    # img = cv2.rectangle(img, (obj[0],obj[1]), (obj[0]+obj[2], obj[1]+obj[3]), (0, 255, 0), 2)
                    draw_sprite(image, obj[0], obj[1])

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(image)
        image = ImageTk.PhotoImage(image)
        panelA.configure(image=image)
        panelA.image = image

    video_capture.release()
 def __init__(
     self,
     dlib68_faceshape_predictor_path="./resources/face_feature_extractors/dlib_68_point/shape_predictor_68_face_landmarks.dat"
 ):
     self.model = dlib.shape_predictor(dlib68_faceshape_predictor_path)
     self.tempfacedetect = dlib.get_frontal_face_detector()
示例#44
0
    return ear


a = "C:/Users/HP/Desktop/alarm_beep.mp3"
s = "C:/Users/HP/Downloads/shape_predictor_68_face_landmarks.dat"
w = 0

EYE_AR_THRESH = 0.20
EYE_AR_CONSEC_FRAMES = 48

COUNTER = 0
ALARM_ON = False

print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(s)

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

print("[INFO] starting video stream thread...")
vs = VideoStream(src=w).start()
time.sleep(1.0)

while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    rects = detector(gray, 0)
示例#45
0
 def __init__(self):
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor("trained_model/land.dat")
import os
import glob
import _pickle as cPickle
import dlib
import cv2
import numpy as np
import shutil
import sys

pula_quadros = 10
cap = cv2.VideoCapture(0)
cont_quadros = 0
limiar = 0.3
detectorFaces = dlib.get_frontal_face_detector()
detectorPontosFaciais = dlib.shape_predictor(
    'Recursos/shape_predictor_68_face_landmarks.dat')
reconhecimentoFacial = dlib.face_recognition_model_v1(
    'Recursos/dlib_face_recognition_resnet_model_v1.dat')
indices = np.load("Recursos/descritores_Alunos_Uniube.pickle",
                  allow_pickle=True)
descitoresFaciais = np.load("Recursos/descritores_Alunos_Uniube.npy")
while cap.isOpened():
    ret, frame = cap.read()
    cont_quadros += 1
    if (cont_quadros % pula_quadros == 0):
        facesDetectadas = detectorFaces(frame, 1)
        for faces in facesDetectadas:
            e, t, d, b = (int(faces.left()), int(faces.top()),
                          int(faces.right()), int(faces.bottom()))

            pontosFaciais = detectorPontosFaciais(frame, faces)
示例#47
0

def writeLandmarksToFile(landmarks, landmarksFileName):
    with open(landmarksFileName, 'w') as f:
        for p in landmarks.parts():
            f.write("%s %s\n" % (int(p.x), int(p.y)))

    f.close()


# Landmark model location
PREDICTOR_PATH = "../../common/shape_predictor_68_face_landmarks.dat"
# Get the face detector
faceDetector = dlib.get_frontal_face_detector()
# The landmark detector is implemented in the shape_predictor class
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)

# Read image
imageFilename = "../data/images/family.jpg"
im = cv2.imread(imageFilename)
imDlib = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# landmarks will be stored in results/family_i.txt
landmarksBasename = "results/family"

# Detect faces in the image
faceRects = faceDetector(imDlib, 0)
print("Number of faces detected: ", len(faceRects))

# List to store landmarks of all detected faces
landmarksAll = []
示例#48
0
def generate_face_correspondences(theImage1, theImage2):
    # Detect the points of face.
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        'code/utils/shape_predictor_68_face_landmarks.dat')
    corresp = np.zeros((68, 2))

    imgList = crop_image(theImage1, theImage2)
    list1 = []
    list2 = []
    j = 1

    for img in imgList:

        size = (img.shape[0], img.shape[1])
        if (j == 1):
            currList = list1
        else:
            currList = list2

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.

        dets = detector(img, 1)

        try:
            if len(dets) == 0:
                raise NoFaceFound
        except NoFaceFound:
            print("Sorry, but I couldn't find a face in the image.")

        j = j + 1

        for k, rect in enumerate(dets):

            # Get the landmarks/parts for the face in rect.
            shape = predictor(img, rect)
            # corresp = face_utils.shape_to_np(shape)

            for i in range(0, 68):
                x = shape.part(i).x
                y = shape.part(i).y
                currList.append((x, y))
                corresp[i][0] += x
                corresp[i][1] += y
                # cv2.circle(img, (x, y), 2, (0, 255, 0), 2)

            # Add back the background
            currList.append((1, 1))
            currList.append((size[1] - 1, 1))
            currList.append(((size[1] - 1) // 2, 1))
            currList.append((1, size[0] - 1))
            currList.append((1, (size[0] - 1) // 2))
            currList.append(((size[1] - 1) // 2, size[0] - 1))
            currList.append((size[1] - 1, size[0] - 1))
            currList.append(((size[1] - 1), (size[0] - 1) // 2))

    # Add back the background
    narray = corresp / 2
    narray = np.append(narray, [[1, 1]], axis=0)
    narray = np.append(narray, [[size[1] - 1, 1]], axis=0)
    narray = np.append(narray, [[(size[1] - 1) // 2, 1]], axis=0)
    narray = np.append(narray, [[1, size[0] - 1]], axis=0)
    narray = np.append(narray, [[1, (size[0] - 1) // 2]], axis=0)
    narray = np.append(narray, [[(size[1] - 1) // 2, size[0] - 1]], axis=0)
    narray = np.append(narray, [[size[1] - 1, size[0] - 1]], axis=0)
    narray = np.append(narray, [[(size[1] - 1), (size[0] - 1) // 2]], axis=0)

    return [size, imgList[0], imgList[1], list1, list2, narray]
示例#49
0
video_capture = cv2.VideoCapture(0)

frozen_graph_filename = 'model/train_model.pb'

with gfile.FastGFile(frozen_graph_filename, "rb") as f:
    graph_def = tf.GraphDef()
    byte = f.read()
    graph_def.ParseFromString(byte)

tf.import_graph_def(graph_def, name='')

# for node in graph_def.node:
#     print(node.name)

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("dlib_pretrained_model.dat")

with tf.Session() as sess:
    detection_graph = tf.get_default_graph()
    input_tensor = detection_graph.get_tensor_by_name('input_tensor:0')
    output_tensor = detection_graph.get_tensor_by_name('output_tensor:0')
    output = detection_graph.get_tensor_by_name('output:0')
    result = detection_graph.get_tensor_by_name('result:0')

    while True:
        ret, frame = video_capture.read()
        rects = detector(frame, 1)

        for rect in rects:
            (x, y, w, h) = face_utils.rect_to_bb(rect)
            face = frame[y - 50: y + h + 10, x - 10: x + w + 20]
示例#50
0
ftp = ftplib.FTP()
ftp.connect("112.175.184.82", 21)
ftp.login("sormdi11", "tidlsl1254!")

#ftp unknown
ftpun = ftplib.FTP()
ftpun.connect("112.175.184.82", 21)
ftpun.login("sormdi11", "tidlsl1254!")

#ftp open
ftpop = ftplib.FTP()
ftpop.connect("112.175.184.82", 21)
ftpop.login("sormdi11", "tidlsl1254!")

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    'C:/Users/haneu/fin_pro/modle/shape_predictor_68_face_landmarks.dat')
face_recog = dlib.face_recognition_model_v1(
    'C:/Users/haneu/fin_pro/modle/dlib_face_recognition_resnet_model_v1.dat')

#인코딩이 되어있거 가져옴
#print("20개의 인코딩")
#descs = np.load('C:/Users/haneu/fin_pro/a_project/a5_descs.npy', allow_pickle=True)[()]
#descs = np.load('C:/Users/haneu/fin_pro/a_project/a10_descs.npy', allow_pickle=True)[()]
descs = np.load('C:/Users/haneu/fin_pro/a_project/a20_descs.npy',
                allow_pickle=True)[()]


#들어온 영상에서 이미지 찾기
def encode_faces(image):

    faces = detector(image, 1)
示例#51
0
import pygame.camera, pygame.image
import dlib, cv2, time, utils, mmap, sys, json
import utils
import posix_ipc
from PIL import Image
import numpy as np

scale_x = utils.SCALE_DOWN
upsample_num = 1

detector = dlib.get_frontal_face_detector()
predictor_path = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(predictor_path)

focused_mod = False
focused_recs = []
focused_rec_delta = 0.2

use_Focus = True

temp_img_file_bmp = "photo.bmp"
temp_img_file_jpeg = "photo_jpg"


def start_camera():
    cap = cv2.VideoCapture(1)
    return cap


def get_image_from_camera(cam):
    ret, frame = cam.read()
示例#52
0
print("starting program.")
print("'s' starts drawing eyes.")
print("'r' to toggle recording image, and 'q' to quit")

cam = VideoStream()
cam.stream.stream.set(3, 1280)
cam.stream.stream.set(4, 720)
vs = cam.start()

time.sleep(.5)

# this detects our face
detector = dlib.get_frontal_face_detector()
# and this predicts our face's orientation
predictor = dlib.shape_predictor(args.predictor)

recording = False
counter = 0


class EyeList(object):
    def __init__(self, length):
        self.length = length
        self.eyes = []

    def push(self, newcoords):
        if len(self.eyes) < self.length:
            self.eyes.append(newcoords)
        else:
            self.eyes.pop(0)
示例#53
0
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))

ALIGN_POINTS = (LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS +
                RIGHT_BROW_POINTS + MOUTH_POINTS + NOSE_POINTS)

OVERLAY_POINTS = (LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS +
                  RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)

# Path to shape predictor file
DLIB_PATH = 'shape_predictor_68_face_landmarks.dat'
FACE_CASCADE = 'cascades/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(FACE_CASCADE)

# Our landpoints' predictor and detector objects
predictor = dlib.shape_predictor(DLIB_PATH)
detector = dlib.get_frontal_face_detector(
)  ##  returns a list of rectangles, each of which corresponding with a face in the image.


# Defining classes for some exception
class TooManyFaces(Exception):
    pass


class NoFaces(Exception):
    pass


# Detect landpoints' on input image
def get_landmarks(image, use_dlib):
示例#54
0
 def initialize(self):
     if self.model == FaceLivenessDetectorModels.EYEBLINKING:
         self.detector = dlib.shape_predictor(
             self.path + 'shape_predictor_68_face_landmarks.dat')
def facial_landmarks():
	predictor = dlib.shape_predictor('Cascades/dlibcascades/shape_predictor_68_face_landmarks.dat')
	return predictor
示例#56
0
def proc_algorithm(q_video2algo, q_algo2video):
    fp = open('.\\logs\\log_algorithm_thread.txt', 'w')
    #    logging.debug('Process(%s) is reading...' % os.getpid())
    fp.write('Process(%s) is reading...\n' % os.getpid())
    fp.flush()

    predictor_path = FACE_LANDMARK_FILE
    predictor = dlib.shape_predictor(predictor_path)
    detector = dlib.get_frontal_face_detector()
    fp.write(
        "Showing detections and predictions on the images in the faces folder...\n"
    )

    width = RESIZED_WIDTH
    height = RESIZED_HEIGHT
    classes = CLASSES_NUMBER

    tf.reset_default_graph()
    ##################################################################################################################
    #CNN Model below, do not tough it. Make sure  that this mode is the same as training model;
    #The reason why same piece of code in traning and validation is to implement save/load function;
    #There is one other solution to implement save/load function without same piece of code but not implemented here;
    #CNN Model start here:
    ##################################################################################################################
    input_x = tf.placeholder(tf.float32, [None, width * height]) / 255.
    output_y = tf.placeholder(tf.int32, [None, classes])
    input_x_images = tf.reshape(input_x, [-1, width, height, 1])

    conv1 = tf.layers.conv2d(inputs=input_x_images,
                             filters=32,
                             kernel_size=[5, 5],
                             strides=1,
                             padding='same',
                             activation=tf.nn.relu)
    print(conv1)

    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    print(pool1)

    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             strides=1,
                             padding='same',
                             activation=tf.nn.relu)

    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    w0 = int(width / 4)
    h0 = int(height / 4)
    flat = tf.reshape(pool2, [-1, w0 * h0 * 64])

    dense = tf.layers.dense(inputs=flat, units=1024, activation=tf.nn.relu)
    print(dense)

    dropout = tf.layers.dropout(inputs=dense, rate=0.5)
    print(dropout)

    logits = tf.layers.dense(inputs=dropout, units=classes)
    print(logits)
    ##################################################################################################################
    #CNN Model end here:
    ##################################################################################################################

    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint('.\\models'))

    face_list = []
    while True:

        if q_video2algo.empty() == True:
            continue

        #q_video2algo is not empty
        frame_index, frame = q_video2algo.get(True)

        if (frame_index == QUIT_THREAD_FLAG):
            break
        #Copy original frame
        frame_without_label = frame.copy()

        img2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        dets = detector(img2, 1)
        face_number = len(dets)
        fp.write("Info: Frame %d received, %d face(s) detected.\n" %
                 (frame_index, face_number))

        #No face detected;
        if (face_number == 0):
            continue

        #Some faces are detected
        face_list = plot_labels_on_frame(frame, dets, predictor)
        crop_image = []
        for single_face in face_list:
            fp.write(
                "Info: [top_left_x, top_left_y, bottom_right_x, bottom_right_y] = [%d, %d, %d, %d]\n"
                % (single_face[0], single_face[1], single_face[2],
                   single_face[3]))
            crop_image = frame_without_label[single_face[1]:single_face[3],
                                             single_face[0]:single_face[2]]
            resized_crop_image = cv2.resize(crop_image, (width, height),
                                            interpolation=cv2.INTER_CUBIC)
            face_saved_file = '.\\logs\\face_frame{}.jpg'.format(frame_index)
            cv2.imwrite(face_saved_file, resized_crop_image)

            #Align image and use tensorflow to do evaluation;
            resized_crop_gray = cv2.cvtColor(resized_crop_image,
                                             cv2.COLOR_BGR2GRAY)
            resized_crop_1d = np.array(resized_crop_gray).reshape(width *
                                                                  height)
            test_x = []
            test_x.append(resized_crop_1d)
            test_x.append(resized_crop_1d)
            test_output = sess.run(logits, {input_x: test_x[0:1]})
            inferenced_y = np.argmax(test_output, 1)
            fp.write('Info: Inferenced face %d\n' %
                     inferenced_y[0])  #Reconized face index
            cv2.putText(frame, 's%d' % (inferenced_y[0] + 1),
                        (single_face[0], single_face[1]),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255),
                        FACE_TEXT_THICKNESS)

        #Send back frame with labels to video thread for displaying
        if q_algo2video.empty() == True:
            q_algo2video.put((frame_index, frame))

    sess.close()
    fp.flush()
    fp.close()
# -*- coding:utf-8 -*-
'''
@time: 2016/11/18 18:45
@author: Silence
'''
import cv2
import dlib
import numpy
import glob

PREDICTOR_PATH = "D:\py2.7.12\Lib\site-packages\dlib-19.2.0-py2.7-win-amd64.egg\shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATURE_AMOUNT = 11

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)


class NoFaces(Exception):
    pass


def get_landmarks(im):
    '''
    本方法主要是获得点的列表
    :param im: 输入一张图像
    :return: 返回68*2的列表
    '''
    rects = detector(im, 1)
    print("脸的个数: {}".format(len(rects)))
    if len(rects) > 1:
示例#58
0
# Código baseado no artigo do Adrian Rosebrock
# https://bit.ly/3ht4dbG

# importar as bibliotecas
import cv2
import dlib
import time
import imutils
from imutils.video import VideoStream
from imutils import face_utils

# dlib detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
vs = VideoStream(src=1).start()
time.sleep(2.0)

# video processing pipeline
while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)

    for rect in rects:
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

        cv2.imshow("Frame", frame)
示例#59
0
ap.add_argument("-p",
                "--shape-predictor",
                required=True,
                help="path to facial landmark predictor")
# ap.add_argument("-v", "--video", type = str, default = "", help = "path to input video file")
args = vars(ap.parse_args())

EYE_AR_THRESH = 0.21
EYE_AR_CONSEC_FRAMES = 3

COUNTER = 0
TOTAL = 0

print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

print("[INFO] starting video stream thread...")

# vs = FileVideoStream(args["video"]).start()
# fileStream = True

vs = VideoStream(src=0).start()

fileStream = False

time.sleep(1.0)
示例#60
0
def get_face(imgpath):
    # 主要函数,只需要调用本函数即可

    # 记录识别时间
    start_time = time.time()

    # Dlib 检测器和预测器
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        './model/shape_predictor_68_face_landmarks.dat')
    # 读取图像文件
    img_rd = cv2.imdecode(np.fromfile(imgpath, dtype=np.uint8), -1)
    # 读取图片文件,
    # 如果报错提示:RuntimeError: Unsupported image type,must be 8bit gray or RGB image.
    # 可以注释本函数,启用上方的cv2.imdecode函数
    # img_rd = cv2.imread(imgpath)
    img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)

    # 人脸数
    faces = detector(img_gray, 0)

    # 设置cv2字体
    font = cv2.FONT_HERSHEY_SIMPLEX
    # 人脸坐标集合
    face_euler_angle_arr = []

    # 标记特征点
    if len(faces) != 0:
        # 检测到人脸
        for i in range(len(faces)):
            print("\n\n正在检测第", i + 1, "张人脸")
            # 取特征点坐标
            # 获取全部68个特征点,后面的计算主要依赖于6点坐标,故这个获取68点坐标只做吉祥物使用,以便在需要修改重构时有个较完善的起点
            # landmarks = np.matrix([[p.x, p.y] for p in predictor(img_rd, faces[i]).parts()])
            # 只获取必要的坐标点
            landmarks = get_image_points_from_landmark_shape(
                predictor(img_rd, faces[i]))

            # 获取旋转向量和平移向量
            ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs = get_pose_estimation(
                img_rd.shape, landmarks)
            if ret != True:
                print('get_pose_estimation failed')
                continue

            # 从旋转向量转换为欧拉角
            ret, pitch, yaw, roll = get_euler_angle(rotation_vector)
            euler_angle_str = 'roll:{}, pitch:{}, yaw:{}'.format(
                roll, pitch, yaw)
            ## euler_angle_str = '上下翻转角:{},平面内旋转角::{}, 左右翻转角:{}'.format(pitch, yaw, roll)
            face_euler_angle_arr.append(euler_angle_str)
            # cv2.putText()参数:cv2图片,文本内容,打印坐标,字体,字体大小,字体颜色,线宽,线型
            # cv2.putText(img_rd, euler_angle_str, (20, 80),
            #             font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
            print("欧拉角:\n {}".format(euler_angle_str))

            # 在图片中构建3维坐标系
            # 以人脸鼻尖为原点画一条面部朝向的线
            (nose_end_point2D,
             jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                           rotation_vector, translation_vector,
                                           camera_matrix, dist_coeffs)

            p1 = (int(landmarks[0][0, 0]), int(landmarks[0][0, 1]))
            p2 = (int(nose_end_point2D[0][0][0]),
                  int(nose_end_point2D[0][0][1]))
            cv2.line(img_rd, p1, p2, (255, 0, 0), 2)
            # 标记每张脸的序号
            # img_rd = putText_chinese(img_rd, "第"+str(i+1)+"张脸", (int(landmarks[0][0,0]),int(landmarks[0][0,1])), 20, (255, 0, 0))
            # 遍历所有特征点,并标记出来
            for idx, point in enumerate(landmarks):

                # 点的坐标
                pos = (int(point[0, 0]), int(point[0, 1]))

                # 利用 cv2.circle 给每个特征点画一个圈
                cv2.circle(img_rd, pos, 10, color=(255, 0, 0))

                # 利用 cv2.putText 写数字
                cv2.putText(img_rd, str(idx + 1), pos, font, 0.5, (0, 0, 255),
                            2, cv2.LINE_AA)
        # img_rd = putText_chinese(img_rd, "人脸数: " + str(len(faces)), (20, 40), 20, (255, 0, 0))
    else:
        # 没有检测到人脸
        # img_rd = putText_chinese(img_rd, "没有检测到人脸或人脸显示不完整", (20, 40), 20, (255, 0, 0))
        face_euler_angle_arr.append("没有检测到人脸或人脸显示不完整")
    print("\n检测用时:{} 秒".format(round(time.time() - start_time, 3)))
    # cv2图片通道顺序为BGR,故需要翻转为RGB
    return Image.fromarray(img_rd[..., ::-1]), len(faces), face_euler_angle_arr