Пример #1
0
def test():
    detector = dlib.simple_object_detector("../boobs.svm")

    win_det = dlib.image_window()
    win_det.set_image(detector)
    # dlib.hit_enter_to_continue()

    print("Showing detections on the images in the faces folder...")
    win = dlib.image_window()

    tp = 0
    fn = 0
    dur = 0

    test_path = "../pics/test/*.jpg"
    # test_path = "/home/external/moderation-p**n-detector/boobs-oboobs/*.jpg"

    for f in glob.glob(test_path):
        try:
            img = io.imread(f)
        except IOError as e:
            print("Image {} can't be loaded: {}".format(f, e.message))
            continue

        try:
            t_start = time.clock()
            dets = detector(img)
            t_end = time.clock()
        except RuntimeError as e:
            print("Image {} can't be detected: {}".format(f, e.message))
            continue

        dur += t_end - t_start

        num = len(dets)
        if num > 0:
            print("Boobs {} detected in file: {}".format(num, f))
            for k, d in enumerate(dets):
                print(
                    "Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                        k, d.left(), d.top(), d.right(), d.bottom()
                    )
                )

            win.clear_overlay()
            win.set_image(img)
            win.add_overlay(dets)
            # dlib.hit_enter_to_continue()

            tp += 1
        else:
            fn += 1

        if tp + fn > 100:
            break

    print("tp={} fn={} precision={} dur={}".format(tp, fn, 1.0 * tp / (tp + fn), 1.0 * dur / (tp + fn)))
 def __init__(self, visulize = True):
     self.detector = dlib.get_frontal_face_detector()
     self.predictor = dlib.shape_predictor(predictor_path)
     self.fronter = getDefaultFrontalizer()
     if visulize:
         self.win = dlib.image_window()
         self.win2 = dlib.image_window()
     else:
         self.win = self.win2 = None
Пример #3
0
def classify(img):
    detector = dlib.simple_object_detector("cupdetector_2.svm")

    win_det = dlib.image_window()
    win_det.set_image(detector)

    win = dlib.image_window()
    test_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test'
    convert_dir = '/home/jyotiska/Dropbox/Computer Vision/Cups_test_convert'
    assorted_dir = '/home/jyotiska/Dropbox/Computer Vision/Item bucket'

    items = os.listdir(assorted_dir)

    convert_i = 0
    for f in glob.glob(convert_dir+"/*.*"):
        print "processing file:", f
        img = io.imread(f)
        extension = f.split(".")[1]
        convert_file = "convert_"+str(convert_i)+"."+extension
        shutil.copy(f,convert_file)
        print "convert file:",convert_file
        background = Image.open(convert_file)
        dets = detector(img)

        print "number of cups detected:", len(dets)
        for d in dets:
        	x = d.left()
        	y = d.top()
        	width = d.right() - x
        	height = d.bottom() - y
            print "  detection position left,top,right,bottom:", d.left(), d.top(), d.right(), d.bottom()
            print width,height

            r = random.randint(0,len(items)-1)
            print r,items[r]
            random_item = Image.open(assorted_dir+"/"+items[r])
            # scale it a bit more, and adjust position

            # Apply blur?
            resized = random_item.resize( (int(1.2*width),int(1.2*height)) )
            background.paste(resized, (d.left()-12,d.top()-10), resized)

        background.show()
        background.save(convert_file)
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        convert_i += 1
Пример #4
0
def camera_fd():
    import cv2

    cam = cv2.VideoCapture(-1)

    win = dlib.image_window()
    detector = detector = dlib.get_frontal_face_detector()

    def detect():
        s, img = cam.read()
        if not s:
            return

        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        dets = detector(img2)
        num = len(dets)

        if num > 0:
            print("found!")

        win.clear_overlay()
        win.set_image(img2)
        win.add_overlay(dets)

    try:
        while True:
            detect()
    except KeyboardInterrupt as e:
        print("exiting")

    cam.release()
    cv2.destroyAllWindows()
Пример #5
0
def camera_boobs():
    import cv2

    # cam = cv2.VideoCapture(-1)
    cam = cv2.VideoCapture("/home/nick/temp/boobs/b1.flv")

    win = dlib.image_window()
    detector = dlib.simple_object_detector("detector.svm")

    def detect():
        s, img = cam.read()
        if not s:
            return

        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        dets = detector(img2)
        num = len(dets)

        if num > 0:
            print("found!")

        win.clear_overlay()
        win.set_image(img2)
        win.add_overlay(dets)

    try:
        while True:
            detect()
    except KeyboardInterrupt as e:
        print("exiting")

    cam.release()
    cv2.destroyAllWindows()
Пример #6
0
def main():
    if len(sys.argv) < 3:
        print(
            "Give the path to the examples/faces directory as the argument to this "
            "program. For example, if you are in the python_examples folder then "
            "execute this program by running:\n"
            "    ./detect.py ../examples/faces ./detector.svm [-s]")
        exit()

    f = sys.argv[1]
    detector_fp = sys.argv[2]

    s = False
    if len(sys.argv) > 3 and sys.argv[3] == '-s':
        s = True

    dets = handle_locations(f, detector_fp)
    print("Number of detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

    if s:
        win = dlib.image_window()
        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
Пример #7
0
 def __init__(self):
     self._detector = dlib.get_frontal_face_detector()
     pose_predictor_path = '../models/dlib/shape_predictor_68_face_landmarks.dat'
     self._predictor = dlib.shape_predictor(pose_predictor_path)
     if not self.SERVER_NO_GUI_MODE:
         self._win = dlib.image_window()
     self._lock = RLock()
Пример #8
0
def show_pair(path1, path2):
    global win1, win2
    if win1 == None:
        win1 = dlib.image_window()
    if win2 == None:
        win2 = dlib.image_window()
    img1 = io.imread(path1)
    img2 = io.imread(path2)
    win1.clear_overlay()
    win2.clear_overlay()
    win1.set_image(img1)
    win2.set_image(img2)
    dlib.hit_enter_to_continue()

    win1.set_image(img1)
    win2.set_image(img2)
def display_landmarks(img, dets, shapes):
    win = dlib.image_window()
    win.clear_overlay()
    win.set_image(img)
    for shape in shapes:
        win.add_overlay(shape)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()
Пример #10
0
def main():
    detector = LandmarkDetector()
    img = cv2.imread("../data/test_data/multi_processed.jpg")
    shape = detector.get_landmarks(img)
    win = dlib.image_window()
    win.set_image(img)
    win.add_overlay(shape)

    print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))
Пример #11
0
    def show_learned_hog_filter(self):
        # Now let's use the detector as you would in a normal application.  First we
        # will load it from disk.
        #print "Loading detector"

        #detector = dlib.simple_object_detector("../data/models/detector.svm")

        # We can look at the HOG filter we learned.  It should look like a face.  Neat!
        win_det = dlib.image_window()
        print("MTB filter")
        win_det.set_image(self.detectors[0])
        dlib.hit_enter_to_continue()
        print("PED filter")
        win_det.set_image(self.detectors[1])
        dlib.hit_enter_to_continue()
Пример #12
0
def DetectFaceLandmarksInList(frameList, faceDetector = None, shapePredictor = None, skipLength = 2, debug = False):
	'''
	Given a frame list, detect (track) the faces
	Returns details of the facial landmarks for each frame
	'''
	if ((faceDetector is None) or (shapePredictor is None)):
		predictorPath = 'coreData/shape_predictor_68_face_landmarks.dat'
		faceDetector = dlib.get_frontal_face_detector()
		shapePredictor = dlib.shape_predictor(predictorPath)

	if (debug):
		win = dlib.image_window()
		win.clear_overlay()

	faceList = []

	for i in range(0, frameList.shape[0], skipLength):
		frame = frameList[i]
		dets = faceDetector(frame, 1)

		if debug:
			win.clear_overlay()
			win.set_image(frame)
			print("Number of faces detected: {}".format(len(dets)))
			for k, d in enumerate(dets):
				print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
				shape = shapePredictor(frame, d)
				win.add_overlay(shape)
			win.add_overlay(dets)

		dets = list(enumerate(dets))
		faceNum = len(dets)

		if (faceNum == 1):
			shape = shapePredictor(frame, dets[0][1])
			# faceShape = NormalizeShape(shape, dets[0][1])
			faceShape = RawShape(shape, dets[0][1])
			faceList.append(faceShape)

	faceList = np.array(faceList)
	return faceList
Пример #13
0
def run(predictor_path, faces_folder_path):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    win = dlib.image_window()

    for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)

            print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                      shape.part(1)))
            left_eye_idxs = range(42, 48)
            left_eye_pts = np.array([[p.x, p.y] for i, p in enumerate(shape.parts()) if i in left_eye_idxs])
            left_eye = np.mean(left_eye_pts, axis=0)
            right_eye_idxs = range(36, 42)
            right_eye_pts = np.array([[p.x, p.y] for i, p in enumerate(shape.parts()) if i in right_eye_idxs])
            right_eye = np.mean(right_eye_pts, axis=0)

            cv2.circle(img, tuple(left_eye.astype(np.int)), 3, color=(0, 255, 255))
            cv2.circle(img, tuple(right_eye.astype(np.int)), 3, color=(0, 255, 255))
            # Draw the face landmarks on the screen.
            win.add_overlay(shape)


        win.set_image(img)
        win.add_overlay(dets)
        cv2.imwrite(os.path.join(face_folder_path, f.replace('.', '_landmarks.')), img)
        dlib.hit_enter_to_continue()
        win.clear_overlay()
Пример #14
0
    def __init__(self):
        #initializes frame
        self.frame = None

        #initializes ros node for face detect, pubilishes to face location
        rospy.init_node('face_detect', anonymous=True)
        self.pub = rospy.Publisher('/face_location', String, queue_size=10)

        #definines file paths
        rospack = rospkg.RosPack()
        PACKAGE_PATH = rospack.get_path("edwin")
        self.predictor_path = PACKAGE_PATH + '/params/shape_predictor_68_face_landmarks.dat'
        self.faces_folder_path = PACKAGE_PATH + '/params'

        #def of attributes
        self.detect = True
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.predictor_path)
        self.window = dlib.image_window()

        #CvBridge to usb_cam, subscribes to usb cam
        self.bridge = CvBridge()
        rospy.Subscriber("usb_cam/image_raw", Image, self.img_callback)
Пример #15
0
def main():
    videofile = sys.argv[1]
    print('[INFO] Reading %s' % videofile )
    vid = imageio.get_reader( videofile )
    nFrames = 0
    frames = []
    for i in range(1000, 3000):
        img = vid.get_data( i )
        frames.append( img )
        nFrames += 1
    print('[INFO] Loaded all frames' )

    bbox_ = get_rectangle( frames[0] )
    # Create the correlation tracker - the object needs to be initialized
    # before it can be used
    tracker = dlib.correlation_tracker()

    win = dlib.image_window()
    # We will track the frames as we load them off of disk

    for k, img in enumerate(frames):
        print("Processing Frame {}".format(k))
        # We need to initialize the tracker on the first frame
        if k == 0:
            # Start a track on the juice box. If you look at the first frame you
            # will see that the juice box is contained within the bounding
            # box (74, 67, 112, 153).
            (x0, y0), (x1, y1) = bbox_
            tracker.start_track(img, dlib.rectangle( x0, y0, x1, y1 ))
        else:
            # Else we just attempt to track from the previous frame
            tracker.update(img)

        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(tracker.get_position())
    def onVideo(self):
        motionChange = False
        predictor_path = "/home/lee/Software/test/shape_predictor_68_face_landmarks.dat"
        face_rec_model_path = "/home/lee/Software/test/dlib_face_recognition_resnet_model_v1.dat"
        faces_folder_path = "/home/lee/Software/pycharm_test/candidate-faces"
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(predictor_path)
        facerec = dlib.face_recognition_model_v1(face_rec_model_path)
        descriptors = []

        for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
            print("Processing file: {}".format(f))
            img = io.imread(f)
            # win.clear_overlay()
            # win.set_image(img)

            dets = detector(img, 1)
            print("Number of faces detected: {}".format(len(dets)))

            for k, d in enumerate(dets):
                shape = predictor(img, d)
                # win.clear_overlay()
                # win.add_overlay(d)
                # win.add_overlay(shape)

                face_descriptor = facerec.compute_face_descriptor(img, shape)
                # print ("face_descriptor: {}".format(face_descriptor))

                v = numpy.array(face_descriptor)
                # print ("v: {}".format(v))
                descriptors.append(v)
                # print ("descriptors: {}".format(descriptors))

        win = dlib.image_window()
        cap = cv2.VideoCapture(0)
        c = 1
        time = 10
        while cap.isOpened():

            ret, cv_img = cap.read()
            if cv_img is None:
                break

            image = cv2.cvtColor(cv_img, cv2.COLOR_RGB2BGR)
            win.clear_overlay()
            win.set_image(image)
            dets = detector(image, 0)
            dist = []
            # print("Number of faces detected: {}".format(len(dets)))
            # lists = [[0 for k in range(4)] for j in range(10)]
            if (c % time == 0):
                for k, d in enumerate(dets):
                    # print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                    #     i, d.left(), d.top(), d.right(), d.bottom()))

                    shape1 = predictor(image, d)
                    # print("Part 0: {}, Part 1: {}, Part 2: {} ...".format(shape.part(0),  shape.part(1), shape.part(2)))
                    win.clear_overlay()
                    win.add_overlay(d)
                    win.add_overlay(shape1)
                    face_descriptor1 = facerec.compute_face_descriptor(
                        image, shape1)
                    # print ("face_descriptor1: {}".format(face_descriptor1))

                    d_test = numpy.array(face_descriptor1)
                    # print ("d_test: {}".format(d_test))

                    for i in descriptors:
                        dist_ = numpy.linalg.norm(i - d_test)
                        dist.append(dist_)
                    # print("dist: {}".format(dist))
                    candidate = [
                        '张丽梅', '丁高兴', '徐科杰', '金莹莹', '李政英', '石光耀', '陈美利', '段宇乐',
                        '李宗辉', '陈杰'
                    ]
                    c_d = dict(zip(candidate, dist))
                    cd_sorted = sorted(c_d.iteritems(), key=lambda d: d[1])
                    # print (cd_sorted)
                    if (((cd_sorted[1][1] - cd_sorted[0][1]) < 0.1) &
                        (cd_sorted[0][1] > 0.381)):
                        print "\n 这个人是:陌生人"
                        j = -1
                        x = -1
                    else:
                        print "\n 这个人是:", cd_sorted[0][0]

                        if (cd_sorted[0][0] == '李政英'):
                            j = d.bottom() - d.top()
                            x = d.left()
                        elif (cd_sorted[0][0] == '陈美利'):
                            j = d.bottom() - d.top()
                            x = d.left()
                        else:
                            j = -1
                            x = -1

                if len(dets) == 0:
                    self.callbackKeyLeft = False
                    self.callbackKeyRight = False
                    self.callbackKeyDown = False
                    self.callbackKeyUp = False
                    motionChange = True
                else:
                    if x > 300:
                        self.callbackKeyRight = True
                        motionChange = True
                    elif 0 < x < 180:
                        self.callbackKeyLeft = True
                        motionChange = True
                    elif 180 < x < 300:
                        self.callbackKeyLeft = False
                        self.callbackKeyRight = False
                        motionChange = True
                        if j > 140:
                            self.callbackKeyDown = True
                            motionChange = True
                        elif 0 < j < 110:
                            self.callbackKeyUp = True
                            motionChange = True
                        elif 110 < j < 140:
                            self.callbackKeyDown = False
                            self.callbackKeyUp = False
                            motionChange = True

                if motionChange == True:
                    velocity = 0
                    velocity += VELOCITYCHANGE if self.callbackKeyUp is True else 0
                    velocity -= VELOCITYCHANGE if self.callbackKeyDown is True else 0
                    rotation = 0
                    rotation += ROTATIONCHANGE if self.callbackKeyLeft is True else 0
                    rotation -= ROTATIONCHANGE if self.callbackKeyRight is True else 0

                    # compute left and right wheel velocities
                    vr = velocity + (rotation / 2)
                    vl = velocity - (rotation / 2)

                    # create drive command
                    cmd = struct.pack(">Bhh", 145, vr, vl)
                    if cmd != self.callbackKeyLastDriveCommand:
                        self.sendCommandRaw(cmd)
                        self.callbackKeyLastDriveCommand = cmd
            c = c + 1
            # win.clear_overlay()
            # win.set_image(img)
            # win.add_overlay(dets)

        cap.release()
Пример #17
0
def display_image(img, shape):
    win = dlib.image_window()
    win.clear_overlay()
    win.set_image(img)
    if shape:
        win.add_overlay(shape)
Пример #18
0
 def view_object_detector(self):
     detector = dlib.simple_object_detector(DETECTOR_SVM)
     win_det = dlib.image_window()
     win_det.set_image(detector)
     dlib.hit_enter_to_continue()
Пример #19
0
def search(img_path, labels_list, threshold=0.5, answer_pic=False):
    # if state not in {'recognition', 'search'}:
    #     raise ValueError('{} not valid'.format(state))
    # 候选人脸文件夹
    # face_folder_path
    # if state == 'recognition':
    # 1.加载正脸检测器
    detector = dlib.get_frontal_face_detector()

    # 2.加载人脸关键点检测器 shape_predictor_68_face_landmarks.dat
    sp = dlib.shape_predictor(
        r"..\resources\shape_predictor_68_face_landmarks.dat")

    # 3. 加载人脸识别模型
    facerec = dlib.face_recognition_model_v1(
        r"..\resources\dlib_face_recognition_resnet_model_v1.dat")

    # image_data = io.imread(image_path)

    win = dlib.image_window()

    descriptors = []

    # 只接受jpg文件
    for f in labels_list:
        path = '../default_search_labels/{}.jpg'.format(f)
        # print("Processing file:{}".format(path))
        # 加载标签图片
        try:
            img = io.imread(path)
        except FileNotFoundError as e:
            print('标签类型不是jpg,更换为png')
            path = '../default_search_labels/{}.png'.format(f)
            img = io.imread(path)

        # The 1 in the second argument indicates that we should upsample the image
        # 1 time.  This will make everything bigger and allow us to detect more
        # faces.

        # 检测人脸数
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))

        for k, d, in enumerate(dets):
            # 人脸关键点检测器sp
            shape = sp(img, d)
            # 画出人脸区域和关键点
            win.clear_overlay()
            win.add_overlay(d)
            win.add_overlay(shape)
            # 3.描述子提取,128D向量
            face_descriptor = facerec.compute_face_descriptor(img, shape)
            # 转换为numpy array
            v = numpy.array(face_descriptor)
            descriptors.append(v)
            # print(descriptors)

    #  加载需测的图片
    img = io.imread(img_path)
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    # 为len(dets)张人脸各建一个列表
    dist = []
    for k, d, in enumerate(dets):
        dist_all = []
        shape = sp(img, d)

        face_descriptor = facerec.compute_face_descriptor(img, shape)
        d_test = numpy.array(face_descriptor)
        # 计算欧式距离
        for i in descriptors:
            dist_ = numpy.linalg.norm(i - d_test)
            dist_all.append(dist_)  # 第k张人脸与第i个标签的比较数据存入列表
        dist.append(dist_all)

    print(dist)

    # 标签库加工,把参数标签文件夹路径中的几个jpg的人名拿出来组成标签列表
    print(1)
    # list_picture_labels = os.listdir(test_folder_path)
    # candidate = []
    # for i in list_picture_labels:
    #     if i[-4:] in ['.jpg', '.JPG', '.PNG', '.png']:
    #         candidate.append(i[:-4])
    #     else:
    #         mb.showwarning('warning', "file invalid!")
    # candidate=['xinyuanjieyi','qiaobenhuannai','shiyuanlimei','fengtimo']

    candidate = labels_list
    c_d = [{}] * len(dets)
    answer = []
    print(dist)
    for i, d, in enumerate(dets):
        # c_d[i]记录第i张人脸的标签与可能性值的信息
        c_d[i] = dict(zip(candidate, dist[i]))

        cd_sorted = sorted(c_d[i].items(), key=lambda d: d[1])
        print(cd_sorted)
        # 返回信息
        # 若dist小于0.4则识别成功,大于则查无此人
        #     print(cd_sorted[0][1])
        #     print(threshold)
        if cd_sorted[0][1] < threshold:
            answer.append(cd_sorted[0][0])  # 查到的人名
            # print(d)
            # print(type(d))
            # 使用opencv在原图上画出人脸位置
            # 显示结果图
            if answer_pic == True:  # 得到结果图
                left_top = (dlib.rectangle.left(d), dlib.rectangle.top(d))
                right_bottom = (dlib.rectangle.right(d),
                                dlib.rectangle.bottom(d))
                left_top_bottom = (dlib.rectangle.left(d),
                                   dlib.rectangle.bottom(d))  # 作为解释文字的左上坐标
                cv2.namedWindow("img", 0)
                cv2.rectangle(img, left_top, right_bottom, (0, 255, 0), 2,
                              cv2.LINE_AA)

                # putText参数:照片 / 添加的文字 / 左上角坐标 / 字体 / 字体大小 / 颜色 / 字体粗细
                # putText(img, text, org, fontFace, fontScale, color, thickness=None, lineType=None,
                #         bottomLeftOrigin=None):
                cv2.putText(img, cd_sorted[0][0], left_top_bottom,
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                cv2.imshow("img", cv2.cvtColor(img,
                                               cv2.COLOR_RGB2BGR))  # 转成BGR显示

    # if cd_sorted[0][1] > 0.4:
    #     answer = "Cannot find a person in the label database!"
    # else:
    #     answer = "The person is:"+cd_sorted[0][0]
    #     print("\n The person is:", cd_sorted[0][0])
    # if answer is not None:
    #     answer.show_information()
    # dlib.hit_enter_to_continue()

    if len(answer) == 0:
        answer = "No Match"
    else:
        if len(answer) == 1:
            answer = '1 person found: ' + ''.join(answer)
        else:
            answer = str(len(answer)) + ' persons found: ' + ','.join(answer)
    print(answer)
    return answer
Пример #20
0
 def show_picture_and_shape(self):
     """Shows the image and the face shape on the screen."""
     win = dlib.image_window()
     win.set_image(self.img)
     win.add_overlay(self.__detection_obj)
     win.wait_until_closed()
Пример #21
0
 def __init__(self, act=True):
     self.wi = dlib.image_window() if act else None
     self.framecount = 0
        "    http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2"\n
        "    http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
    exit()

predictor_path = sys.argv[1]
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]

# Load all the models we need: a detector to find the faces
# a shape predictor to find face landmarks so we can precisely localize the face
# and finally the face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

win = dlib.image_window()

# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
Пример #23
0
import cv2
import numpy as np
import dlib
import pandas
from neural_net_svm import NeuralNetSvm
from sklearn.utils import shuffle
from pathlib import Path
import os
import csv

# to store all the frames with the bounding box over the localized faces
# use HoG to detect faces
find_face = dlib.get_frontal_face_detector()

# display the image with the bounding box
win_show_image = dlib.image_window()

# load pre-trained models from dlib to detect the facial landmarks
shape_predictor = 'E:\Wenger\shape_predictor_68_face_landmarks.dat\shape_predictor_68_face_landmarks.dat'

# create the detector using the above pretrained model
face_pose_detector = dlib.shape_predictor(shape_predictor)

# text file opened to save the landmarks
fp = open("landmarks_curious.txt", "wb")


def localize_face_in_frame(path_in, landmarks=[]):  #to store landmarks
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    # landmarks index
        "execute this program by running:\n"
        "    ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces\n"
        "You can download a trained facial shape predictor from:\n"
        "    http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2")
    exit()

predictor_path = sys.argv[1]     # takes in a shape predictor as first argument 
faces_folder_path = sys.argv[2]  # takes in the folder containing the .jpg images 

detector = dlib.get_frontal_face_detector() # detects faces, detector.length = # of faces

# From API: This object (shape_predictor()) is a tool that takes in an image region containing some object and outputs 
# a set of point locations that define the pose of the object.
predictor = dlib.shape_predictor(predictor_path) 

win = dlib.image_window()  # GUI object

for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")): # glob alows for '*.jpg' to work 
    print("Processing file: {}".format(f))
    img = io.imread(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets))) # prints the amount of faces detected 
    for k, d in enumerate(dets):   #for (k = 0; k < dets.length; k ++) where d = dets[k]
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(  # prints the corner coordinates 
Пример #25
0
def draw_rectangle(image, coords):
    img = dlib.image_window()
    img.set_image(image)
    for i, mtr in enumerate(coords):
        img.add_overlay(mtr)
    return img
Пример #26
0
from typing import TypeVar, NewType, Type, Generic#, ClassVar

from typing import Container, Hashable, Iterable, Sized, Callable, Awaitable, AsyncIterable
from typing import Iterator, Sequence, Set, Mapping, MappingView, AsyncIterator#, Coroutine
from typing import Generator, MutableSequence, ByteString, MutableSet, MutableMapping, ItemsView, KeysView, ValuesView

from typing import SupportsInt, SupportsFloat, SupportsAbs, SupportsRound
from typing import Union, Optional, AbstractSet, Reversible
from typing import Any, re, io, AnyStr, Tuple, NamedTuple, List, Dict, DefaultDict 

import dlib
from skimage import io
from PIL import Image
import numpy as np
import cv2

image_file = 'capture.png' # type: string
img = io.imread(image_file) # type: np.ndarray
# http://kivantium.hateblo.jp/entry/2015/07/25/184346
# selective search
rects = [] # type: List<(bool, {left:Callable[[],int], top:Callable[[],int], right:Callable[[],int], bottom:Callable[[],int]})>
dlib.find_candidate_object_locations(img, rects, min_size=1000)

win = dlib.image_window() # type: dlib.image_window
win.set_image(img)
for k, d in enumerate(rects):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        k, d.left(), d.top(), d.right(), d.bottom()))
    win.add_overlay(d)
dlib.hit_enter_to_continue()
Пример #27
0
arduino = serial.Serial('/dev/ttyACM0', 9600)

#abrir webcam
cap = cv2.VideoCapture(0)

#cargar los archivos svm entrenados
detector1 = dlib.fhog_object_detector("dataset_abierto.svm")
detector2 = dlib.fhog_object_detector("dataset_cerrado.svm")
detector3 = dlib.fhog_object_detector("dataset_derecho.svm")
detector4 = dlib.fhog_object_detector("dataset_izquierdo.svm")

detectors = [detector1, detector2, detector3, detector4]

#se muestran en pantalla las caracteristicas de HOG para cada detector
win_det = dlib.image_window()
win_det.set_image(detectors[0])

win_det2 = dlib.image_window()
win_det2.set_image(detectors[1])

win_det3 = dlib.image_window()
win_det3.set_image(detectors[2])

win_det4 = dlib.image_window()
win_det4.set_image(detectors[3])

win = dlib.image_window()
action = "nada"
font_color = (0, 0, 0)
Пример #28
0
def DetectFaceInListDlib(frameList,
                         faceDetector=None,
                         skipLength=2,
                         debug=False):
    '''
	Given a frame list, detect (track) the faces
	Returns subimages of faces after normalization and smoothing the enclosing rectangle
	'''
    if ((faceDetector is None)):
        predictorPath = 'coreData/shape_predictor_68_face_landmarks.dat'
        faceDetector = dlib.get_frontal_face_detector()

    if (debug):
        win = dlib.image_window()
        win.clear_overlay()

    faceList = []
    newFrameList = []
    rowList = []
    colList = []
    detsList = []
    smoothRowSize = []
    smoothColSize = []
    winSize = (20 / skipLength)

    for i in range(0, frameList.shape[0], skipLength):
        frame = frameList[i]
        dets = faceDetector(frame, 1)
        dets = list(enumerate(dets))
        if (len(dets) != 1):
            continue
        detsList.append(dets)
        newFrameList.append(frame)
        for k, d in (dets):
            rowList.append(np.abs(d.left() - d.right()))
            colList.append(np.abs(d.top() - d.bottom()))

    for i in range(len(rowList)):
        rowAvg = np.mean(
            rowList[int(max(0, i - winSize)):int(min(len(rowList), i +
                                                     winSize))]) + 6
        colAvg = np.mean(
            colList[int(max(0, i - winSize)):int(min(len(colList), i +
                                                     winSize))]) + 6
        smoothRowSize.append(int(round(rowAvg)))
        smoothColSize.append(int(round(colAvg)))

    for i in range(len(detsList)):
        dets = detsList[i]
        frame = newFrameList[i]
        grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rowc = (d.left() + d.right()) / 2
        colc = (d.top() + d.bottom()) / 2
        rows = smoothRowSize[i]
        cols = smoothColSize[i]
        # Forcefully make the enclosing box a square
        rows = max(rows, cols)
        cols = max(rows, cols)

        faceImg = grayFrame[
            int(max(0, colc -
                    (cols / 2))):int(min(frame.shape[0], colc + (cols / 2) +
                                         1)),
            int(max(0, rowc -
                    (rows / 2))):int(min(frame.shape[1], rowc + (rows / 2) +
                                         1))]
        # Smoothing rectangle sizes (Running average of -50/+50 frames)
        faceImg = cv2.equalizeHist(faceImg)
        # Illumination (CLAHE normalization) Normalization

        if debug:
            win.clear_overlay()
            grayFrame[int(max(0, colc - (
                cols / 2))):int(min(frame.shape[0], colc + (cols / 2) + 1)),
                      int(max(0, rowc - (rows / 2))
                          ):int(min(frame.shape[1], rowc + (rows / 2) +
                                    1))] = faceImg
            win.set_image(grayFrame)
            dlib.hit_enter_to_continue()

        faceImg = cv2.resize(faceImg, (100, 100))
        faceImg = np.array(faceImg)
        faceList.append(faceImg)

    faceList = np.array(faceList)
    print("该视频总共检测出", faceList.shape[0], "个face_in_list(dlib)文件")
    return faceList
Пример #29
0
def train(image_folder, append):

    if append == 0:

        #code to open createXML for the noob user with all the required params

        cmd = 'createXML.exe -c' + image_folder + '/training.xml ' + image_folder
        run(cmd, 5)

        cmd = 'createXML.exe ' + image_folder + '/training.xml'
        run(cmd, 50)

    # <NOTE> <IN PROGRESS> include code to write new XML to the old XML and use the latter for the training

    elif append == 1:

        #code to open createXML for the noob user with all the required params

        cmd = 'createXML.exe -c' + image_folder + '/trainingTemp.xml ' + image_folder
        run(cmd, 5)

        cmd = 'createXML.exe ' + image_folder + '/trainingTemp.xml'
        run(cmd, 50)

        dlib.hit_enter_to_continue()

        # doing all the magic stuff to append the new XML to the old one

        xml1 = image_folder + "/training.xml"
        xml2 = image_folder + "/trainingTemp.xml"

        removeUselessText(xml1)
        removeUselessText(xml2)

        #combineXML(xml1,xml2)
        r = XMLCombiner((xml1, xml2)).combine()

        with open(xml1, "r+") as f:
            f.write(et.tostring(r.getroot()))

        #Convert the XML to better format before saving it for the training as there may be some improper indentation

    # setting option in dlib

    options = dlib.simple_object_detector_training_options()

    # symmetric detector
    options.add_left_right_image_flips = True

    # SVM C parameter.larger value will lead to overfitting
    options.C = 2

    # Tell the code how many CPU cores your computer has for the fastest training.
    options.num_threads = 4
    options.be_verbose = True

    training_xml_path = os.path.join(image_folder, "training.xml")
    #testing_xml_path = os.path.join(image_folder, "testing.xml")

    # saving the detector as detector.svm with input as the xml file after doing the training

    dlib.train_simple_object_detector(training_xml_path, "detector.svm",
                                      options)

    # Printing the accuracy with training data

    print("\nTraining accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, "detector.svm")))

    # Doing the detection
    detector = dlib.simple_object_detector("detector.svm")

    # Looking at the HOG filter the machine has learned.
    win_det = dlib.image_window()
    win_det.set_image(detector)
def grab_boundingbox_face_test(image_directory):
    '''
    http://dlib.net/face_landmark_detection.py.html where I got this from
    Grabs faces and important features to feed into network from the test images
    :param training_image_directory: Directory of the images
    :return: bounding boxes of every face and 68 features of every face as well as what faces were not grabbed
    '''
    import dlib
    import glob

    data_directory = get_data_directory()
    predictor_path = data_directory + "/dlib-models/shape_predictor_68_face_landmarks.dat"
    faces_folder_path = image_directory

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    win = dlib.image_window()

    shape_list = []
    bounding_face_list = []
    # If it is a 1 in the list then it was excluded and a 0 means included
    # Going to be used when grabbing labels and when figuring out what to bound
    exclusion_list = []

    current_index = 0

    for f in glob.glob(os.path.join(faces_folder_path, "*.pgm")):
        print("Processing file: {}".format(f))
        # TODO: FIX THIS
        img = Image.open(f).convert('L')
        img = np.array(img, dtype=np.uint8)
        print(np.shape(img))
        #img = dlib.load_grayscale_image(f)
        # img = dlib.load_rgb_image(f)
        # Do this now before we try finding a face
        exclusion_list.append(1)

        win.clear_overlay()
        win.set_image(img)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            # Found a face so make this zero
            exclusion_list[current_index] = 0
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))

            bounding_face_list.append(d)

            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                      shape.part(1)))

            shape_list.append(shape)
            # Draw the face landmarks on the screen.
            win.add_overlay(shape)

        current_index += 1

        win.add_overlay(dets)

    bounding_face_list = np.array(bounding_face_list)
    shape_list = np.array(shape_list)
    # Need to reshape to be (len, 1) not (len,)
    # bounding_face_list = bounding_face_list.reshape(len(bounding_face_list))
    # shape_list = shape_list.reshape(len(shape_list))

    # TODO: Comment these out before turning in
    print(np.shape(bounding_face_list))
    print(np.shape(shape_list))
    print(exclusion_list)
    print(np.shape(bounding_face_list))

    return bounding_face_list, shape_list, exclusion_list
Пример #31
0
def grimace(filename):

	import argparse,sys

	try:
		from FeatureGen import findRatio, generateFeatures
	except ImportError:
		exit()

	try:
		import dlib
		from skimage import io
		import numpy
		import cv2
		from sklearn.externals import joblib
	except ImportError:
		exit()

	emotions={ 1:"Anger", 2:"Contempt", 3:"Disgust", 4:"Sleep", 5:"Happy", 6:"Sadness", 7:"Cry"}

	def Predict_Emotion(filename):

		try:
			img=io.imread(filename)
			cvimg=cv2.imread(filename)
		except:
			return

		win.clear_overlay()
		win.set_image(img)


		dets=detector(img,1)

		if len(dets)==0:
			print "Unable to find any face."
			return

		for k,d in enumerate(dets):

			shape=predictor(img,d)
			landmarks=[]
			for i in range(68):
				landmarks.append(shape.part(i).x)
				landmarks.append(shape.part(i).y)
		
	
			landmarks=numpy.array(landmarks)
	
			features=generateFeatures(landmarks)
			features= numpy.asarray(features)

			pca_features=pca.transform(features)

			emo_predicts=classify.predict(pca_features)

			global fin

			fin = emotions[int(emo_predicts[0])]
			print 'Grimace:', fin

			#font = cv2.FONT_HERSHEY_SIMPLEX
			#cv2.putText(cvimg,emotions[int(emo_predicts[0])],(20,20), font, 1,(0,255,255),2)

			win.add_overlay(shape)

		cv2.namedWindow("Output")
		cv2.imshow("Output",cvimg)
		time.sleep(2)
		cv2.destroyAllWindows()

		


	if __name__ == "__main__":


		landmark_path="shape_predictor_68_face_landmarks.dat"

		detector= dlib.get_frontal_face_detector()

		try:
			predictor= dlib.shape_predictor(landmark_path)
		except:
			exit()

		win=dlib.image_window()


		try:
			classify=joblib.load("traindata.pkl")
			pca=joblib.load("pcadata.pkl")
		except:
			exit()

		Predict_Emotion(filename)
Пример #32
0
            histr = cv2.calcHist([img], [i], None, [256], [0, 256])
            plt.plot(histr, color=col)
            plt.xlim([0, 256])

        plt.show()

    def plot_hist(self, hist):
        plt.plot(hist)
        plt.xlim([0, 256])
        plt.show()

if __name__ == '__main__':

    fe = Face_Encoding()
    win1 = dlib.image_window()
    win2 = dlib.image_window()
    win3 = dlib.image_window()
    win4 = dlib.image_window()

    for image_path in paths.list_images(
            "D:\Tuts\DataScience\Python\Datasets\FGNET\Age_Test\Old"):
        image = dlib.load_rgb_image(image_path)
        gray = dlib.load_grayscale_image(image_path)

        #image = cv2.resize(image, (300, 300))
        #gray = cv2.resize(gray, (300, 300))

        descriptor, image = fe._compute_facenet_embedding_dlib(image=image,
                                                               draw=True)
Пример #33
0
def finding_face_landmark(file_name):
    # You can download the required pre-trained face detection model here:
    # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
    model = "shape_predictor_68_face_landmarks.dat"

    face_detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(model)

    image_window = dlib.image_window()
    image = io.imread(file_name)

    detected_faces = face_detector(image, 1)

    print("Found {} faces in the image file {}".format(len(detected_faces),
                                                       image))
    if (len(detected_faces) != 1):
        print(
            "On the photo, there are more faces. Please try out with different photo. "
        )
        return []

    image_window.set_image(image)
    face = detected_faces[0]

    image_window.add_overlay(face)
    landmarks = shape_predictor(image, face)

    leftEye1 = landmarks.part(42)
    rightEye1 = landmarks.part(39)
    nose = landmarks.part(30)
    noseTip = landmarks.part(27)
    mouth = landmarks.part(62)
    noseLeft = landmarks.part(31)
    noseRight = landmarks.part(35)

    right1 = landmarks.part(1)
    left1 = landmarks.part(15)
    right2 = landmarks.part(4)
    left2 = landmarks.part(12)
    right3 = landmarks.part(6)
    left3 = landmarks.part(10)

    leftEye2 = landmarks.part(45)
    rightEye2 = landmarks.part(36)

    # for j in range(1, 68):
    #   pos = pose_landmarks.part(j)
    #   cv2.circle(image, (pos.x, pos.y), 1, (0, 0, 255), -1)

    # cv2.circle(image, (leftEye1.x, leftEye1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (rightEye1.x, rightEye1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (nose.x, nose.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseTip.x, noseTip.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (mouth.x, mouth.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseLeft.x, noseLeft.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (noseRight.x, noseRight.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right1.x, right1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right2.x, right2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (right3.x, right3.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left1.x, left1.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left2.x, left2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (left3.x, left3.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (leftEye2.x, leftEye2.y), 1, (255, 255, 0), -1)
    # cv2.circle(image, (rightEye2.x, rightEye2.y), 1, (255, 255, 0), -1)

    # cv2.line(image, (leftEye1.x, leftEye1.y), (rightEye1.x, rightEye1.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (mouth.x, mouth.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (mouth.x, mouth.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (mouth.x, mouth.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (noseTip.x, noseTip.y), (nose.x, nose.y), (255, 255, 0), 1)
    # cv2.line(image, (noseLeft.x, noseLeft.y), (noseRight.x, noseRight.y), (255, 255, 0), 1)
    # cv2.line(image, (left1.x, left1.y), (right1.x, right1.y), (255, 255, 0), 1)
    # cv2.line(image, (left2.x, left2.y), (right2.x, right2.y), (255, 255, 0), 1)
    # cv2.line(image, (left3.x, left3.y), (right3.x, right3.y), (255, 255, 0), 1)
    # cv2.line(image, (left1.x, left1.y), (leftEye2.x, leftEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (right1.x, right1.y), (rightEye2.x, rightEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (leftEye1.x, leftEye1.y), (leftEye2.x, leftEye2.y), (255, 255, 0), 1)
    # cv2.line(image, (rightEye1.x, rightEye1.y), (rightEye2.x, rightEye2.y), (255, 255, 0), 1)

    d1 = euclidean_distance(leftEye1, rightEye1)  # distance between the eyes
    d2 = euclidean_distance(
        leftEye1, mouth
    )  # distance between middle of the left eyes and middle point of mouth
    d3 = euclidean_distance(
        rightEye1, mouth
    )  # distance between middle of the right eyes and middle point of mouth
    d4 = euclidean_distance(
        leftEye1, nose
    )  # distance between middle of the left eyes and middle point of nose
    d5 = euclidean_distance(
        rightEye1, nose
    )  # distance between middle of the rigth eyes and middle point of nose
    d6 = euclidean_distance(
        mouth, nose
    )  # distance between middle point of mouth and middle point of nose
    d7 = euclidean_distance(
        noseTip, nose)  # distance of middle point of d1 and middle of nose
    d8 = euclidean_distance(noseLeft, noseRight)  # width of nose
    d9 = euclidean_distance(left1, right1)  # width of face
    d10 = euclidean_distance(left2, right2)  # width of face
    d11 = euclidean_distance(left3, right3)  # width of face
    d12 = euclidean_distance(leftEye1, leftEye2)  # width od left eye
    d13 = euclidean_distance(rightEye1, rightEye2)  # width of right eye
    d14 = euclidean_distance(left1, leftEye2)
    d15 = euclidean_distance(right1, rightEye2)

    features = []

    features.append(d1)
    features.append(d2)
    features.append(d3)
    features.append(d4)
    features.append(d5)
    features.append(d6)
    features.append(d7)
    features.append(d8)
    features.append(d9)
    features.append(d10)
    features.append(d11)
    features.append(d12)
    features.append(d13)
    features.append(d14)
    features.append(d15)

    image_window.add_overlay(landmarks)

    #  cv2.imshow("Output", image)
    #  cv2.waitKey(0)

    dlib.hit_enter_to_continue()
    return features
Пример #34
0
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector_eye.svm")
#detector = dlib.get_frontal_face_detector()

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

## Now let's run the detector over the images in the faces folder and display the
## results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "BioID_0102.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
Пример #35
0
 def __init__(self):
     self.detector = dlib.get_frontal_face_detector()
     self.win = dlib.image_window()
Пример #36
0
def main():
    #Defining the video capture object
    video_capture = cv2.VideoCapture(1)
    thresh = 0.25
    frame_check = 15
    detect = dlib.get_frontal_face_detector()
    predict = dlib.shape_predictor("/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat")# change to path where landmark points are stored

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]   # get the left eye index
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]  # get the right eye index
    flag=0

    sess = tf.Session()
    my_head_pose_estimator = CnnHeadPoseEstimator(sess)
    my_head_pose_estimator.load_roll_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))# change to deepgaze directory path
    my_head_pose_estimator.load_pitch_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
    my_head_pose_estimator.load_yaw_variables(os.path.realpath("/home/agopinath1996/git_ws/deepgaze/etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))


    #Start of Eye Gaze Tracking
    win = dlib.image_window()
    
    predictor_path = "/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat"
    roi = []
    ref_point = 0 
    index1 = 0
    pt_lefteye_corner_x= 0
    pt_lefteye_corner_y = 0
    pt_pos1 = 0
    predictor = dlib.shape_predictor(predictor_path)
    pt_x2 =0
    pt_y2 = 0
    pt_x1 = 0
    pt_y1 = 0
    pt_actualx = 0
    pt_actualy = 0
    detector = dlib.get_frontal_face_detector()
    flag = 0
    flag1 = 0
    pt_righteye_corner_x = 0
    pt_righteye_corner_y = 0


    if(video_capture.isOpened() == False):
        print("Error: the resource is busy or unvailable")
    else:
        print("The video source has been opened correctly...")

    #Create the main window and move it
    cv2.namedWindow('Video')
    cv2.moveWindow('Video', 20, 20)

    #Obtaining the CAM dimension
    cam_w = int(video_capture.get(3))
    cam_h = int(video_capture.get(4))

    #Defining the camera matrix.
    #To have better result it is necessary to find the focal
    # lenght of the camera. fx/fy are the focal lengths (in pixels)
    # and cx/cy are the optical centres. These values can be obtained
    # roughly by approximation, for example in a 640x480 camera:
    # cx = 640/2 = 320
    # cy = 480/2 = 240
    # fx = fy = cx/tan(60/2 * pi / 180) = 554.26
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / numpy.tan(60/2 * numpy.pi / 180)
    f_y = f_x

    #Estimated camera matrix values.
    camera_matrix = numpy.float32([[f_x, 0.0, c_x],
                                   [0.0, f_y, c_y],
                                   [0.0, 0.0, 1.0] ])

    print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")

    #These are the camera matrix values estimated on my webcam with
    # the calibration code (see: src/calibration):
    camera_matrix = numpy.float32([[602.10618226,          0.0, 320.27333589],
                                   [         0.0, 603.55869786,  229.7537026],
                                   [         0.0,          0.0,          1.0] ])

    #Distortion coefficients
    #camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    #Distortion coefficients estimated by calibration
    camera_distortion = numpy.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])


    #This matrix contains the 3D points of the
    # 11 landmarks we want to find. It has been
    # obtained from antrophometric measurement
    # on the human head.
    landmarks_3D = numpy.float32([P3D_RIGHT_SIDE,
                                  P3D_GONION_RIGHT,
                                  P3D_MENTON,
                                  P3D_GONION_LEFT,
                                  P3D_LEFT_SIDE,
                                  P3D_FRONTAL_BREADTH_RIGHT,
                                  P3D_FRONTAL_BREADTH_LEFT,
                                  P3D_SELLION,
                                  P3D_NOSE,
                                  P3D_SUB_NOSE,
                                  P3D_RIGHT_EYE,
                                  P3D_RIGHT_TEAR,
                                  P3D_LEFT_TEAR,
                                  P3D_LEFT_EYE,
                                  P3D_STOMION])

    #Declaring the two classifiers
    my_cascade = haarCascade("/home/agopinath1996/git_ws/deepgaze/etc/xml/haarcascade_frontalface_alt.xml", "/home/agopinath1996/git_ws/deepgaze/etc/xml/haarcascade_profileface.xml")
    #TODO If missing, example file can be retrieved from http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
    my_detector = faceLandmarkDetection('/home/agopinath1996/git_ws/deepgaze/scripts/shape_predictor_68_face_landmarks.dat')





    #Error counter definition
    no_face_counter = 0

    #Variables that identify the face
    #position in the main frame.
    face_x1 = 0
    face_y1 = 0
    face_x2 = 0
    face_y2 = 0
    face_w = 0
    face_h = 0

    #Variables that identify the ROI
    #position in the main frame.
    roi_x1 = 0
    roi_y1 = 0
    roi_x2 = cam_w
    roi_y2 = cam_h
    roi_w = cam_w
    roi_h = cam_h
    roi_resize_w = int(cam_w/10)
    roi_resize_h = int(cam_h/10)

    while(True):

        # Capture frame-by-frame
        ret, frame = video_capture.read()
        ret, frame_eye = video_capture.read()

        #print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0])  + "]")
        gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)

        img = cv2.cvtColor(frame_eye, cv2.COLOR_RGB2BGR) #for eye gaze detection
        drowsyframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        subjects = detect(drowsyframe, 0)
        for subject in subjects:
            shape = predict(frame,subject)
            shape = face_utils.shape_to_np(shape)
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = leftEAR + rightEAR / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0,255,0),1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0,255,0), 1)
            if ear<thresh:
                flag+= 1
                print(flag)
                if flag >= frame_check:
                    cv2.putText(frame, "WAKEUPPPP", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)
                    cv2.putText(frame, "WAKEUPPPP", (10, 325), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255),2)
            else:
                flag=0

#Eye Gaze Detetction
        dets = detect(img, 0)
        check = 5
        shapes_eye = []
        for k,d in enumerate(dets):
            #print("dets{}".format(d))
            #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))

            shape_eye = predict(img, d)

            for index, pt in enumerate(shape_eye.parts()):
                #print('Part {}: {}'.format(index, pt))
                pt_pos = (pt.x, pt.y)
                cv2.circle(img, pt_pos, 1, (0,225, 0), 2)
                if index == 29:
                    pt_x2 = int(pt.x)
                    pt_y2 = int(pt.y)
                if index == 18:
                    pt_x1 = int(pt.x)
                    pt_y1 = int(pt.y)
                if index == 37:
                    pt_righteye_corner_x = pt.x
                    pt_righteye_corner_y = pt.y
                if index == 40:
                    pt_lefteye_corner_x = pt.x
                    pt_lefteye_corner_y = pt.y
                roi =  frame_eye[pt_y1:pt_y2,pt_x1:pt_x2]
                roi_gray = cv2.cvtColor(roi,cv2.COLOR_RGB2GRAY)
                _, threshold = cv2.threshold(roi_gray, 30, 255, cv2.THRESH_BINARY_INV)
                try:
                    M = cv2.moments(threshold)
                    #print(M)
                    cX = int(M["m10"]/M["m00"])
                    cY = int(M["m01"]/M["m00"])
                    #print(cX,cY)
                    pt_actualx = pt_x1+cX
                    pt_actualy = pt_y1+cY
                    #print(pt_actualx,pt_actualy)
                    diff_right = pt_actualx-pt_righteye_corner_x
                    diff_left = pt_lefteye_corner_x - pt_actualx
                    print(diff_right,diff_left)
                    #print(cX,cY)
                    if diff_right < 3:
                        cv2.putText(frame,'Look straight!',(10,60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(10,10,255),2)
                    if diff_left <3:
                        cv2.putText(frame,'Look straight!',(10,60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(10,10,255),2)


                except:
                    pass
                cv2.circle(frame,(pt_actualx,pt_actualy), 2,(255,0,255),-1)
                #print(pt_actualx,pt_actualy)


            #print(pt_x1,pt_x2,pt_y1,pt_y2)
            #print(roi.shape_eye)
            #print(img.shape_eye)
            try:
                cv2.imshow("threshold", threshold)
                cv2.waitKey(1)
            except:
                pass

        win.clear_overlay()
        win.set_image(img)
        if len(shapes_eye)!= 0 :
            for i in range(len(shapes_eye)):
                win.add_overlay(shapes_eye[i])





        #Looking for faces with cascade
        #The classifier moves over the ROI
        #starting from a minimum dimension and augmentig
        #slightly based on the scale factor parameter.
        #The scale factor for the frontal face is 1.10 (10%)
        #Scale factor: 1.15=15%,1.25=25% ...ecc
        #Higher scale factors means faster classification
        #but lower accuracy.
        #
        #Return code: 1=Frontal, 2=FrontRotLeft,
        # 3=FrontRotRight, 4=ProfileLeft, 5=ProfileRight.
        my_cascade.findFace(gray, True, True, True, True, 1.10, 1.10, 1.15, 1.15, 40, 40, rotationAngleCCW=30, rotationAngleCW=-30, lastFaceType=my_cascade.face_type)
        #print(returnvalue)
        #Accumulate error values in a counter
        if(my_cascade.face_type == 0):
            no_face_counter += 1

        #If any face is found for a certain
        #number of cycles, then the ROI is reset
        if(no_face_counter == 50):
            no_face_counter = 0
            roi_x1 = 0
            roi_y1 = 0
            roi_x2 = cam_w
            roi_y2 = cam_h
            roi_w = cam_w
            roi_h = cam_h

        #Checking wich kind of face it is returned
        if(my_cascade.face_type > 0):

            #Face found, reset the error counter
            no_face_counter = 0

            #Because the dlib landmark detector wants a precise
            #boundary box of the face, it is necessary to resize
            #the box returned by the OpenCV haar detector.
            #Adjusting the frame for profile left
            if(my_cascade.face_type == 4):
                face_margin_x1 = 20 - 10 #resize_rate + shift_rate
                face_margin_y1 = 20 + 5 #resize_rate + shift_rate
                face_margin_x2 = -20 - 10 #resize_rate + shift_rate
                face_margin_y2 = -20 + 5 #resize_rate + shift_rate
                face_margin_h = -0.7 #resize_factor
                face_margin_w = -0.7 #resize_factor
            #Adjusting the frame for profile right
            elif(my_cascade.face_type == 5):
                face_margin_x1 = 20 + 10
                face_margin_y1 = 20 + 5
                face_margin_x2 = -20 + 10
                face_margin_y2 = -20 + 5
                face_margin_h = -0.7
                face_margin_w = -0.7
            #No adjustments
            else:
                face_margin_x1 = 0
                face_margin_y1 = 0
                face_margin_x2 = 0
                face_margin_y2 = 0
                face_margin_h = 0
                face_margin_w = 0

            #Updating the face position
            face_x1 = my_cascade.face_x + roi_x1 + face_margin_x1
            face_y1 = my_cascade.face_y + roi_y1 + face_margin_y1
            face_x2 = my_cascade.face_x + my_cascade.face_w + roi_x1 + face_margin_x2
            face_y2 = my_cascade.face_y + my_cascade.face_h + roi_y1 + face_margin_y2
            face_w = my_cascade.face_w + int(my_cascade.face_w * face_margin_w)
            face_h = my_cascade.face_h + int(my_cascade.face_h * face_margin_h)

            crop_img = frame[face_y1:face_y2, face_x1:face_x2]
            cv2.imshow("cropped", crop_img)

            roll = my_head_pose_estimator.return_roll(crop_img)
            pitch = my_head_pose_estimator.return_pitch(crop_img)
            yaw = my_head_pose_estimator.return_yaw(crop_img)
            #print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0])  + "]")

            if yaw > 30:
                cv2.putText(frame, "You are facing right!", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)
            if yaw < -30:
                cv2.putText(frame, "You are facing left!", (10,30), cv2.FONT_HERSHEY_PLAIN, 1.6, (10,10,255), 2)








            #Updating the ROI position
            roi_x1 = face_x1 - roi_resize_w
            if (roi_x1 < 0): roi_x1 = 0
            roi_y1 = face_y1 - roi_resize_h
            if(roi_y1 < 0): roi_y1 = 0
            roi_w = face_w + roi_resize_w + roi_resize_w
            if(roi_w > cam_w): roi_w = cam_w
            roi_h = face_h + roi_resize_h + roi_resize_h
            if(roi_h > cam_h): roi_h = cam_h
            roi_x2 = face_x2 + roi_resize_w
            if (roi_x2 > cam_w): roi_x2 = cam_w
            roi_y2 = face_y2 + roi_resize_h
            if(roi_y2 > cam_h): roi_y2 = cam_h

            #Debugging printing utilities
            if(DEBUG == True):
                #print("FACE: ", face_x1, face_y1, face_x2, face_y2, face_w, face_h)
                #print("ROI: ", roi_x1, roi_y1, roi_x2, roi_y2, roi_w, roi_h)

                #Drawing a green rectangle
                # (and text) around the face.
                text_x1 = face_x1
                text_y1 = face_y1 - 3
                if(text_y1 < 0): text_y1 = 0
                cv2.putText(frame, "FACE", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
                cv2.rectangle(frame,
                             (face_x1, face_y1),
                             (face_x2, face_y2),
                             (0, 255, 0),
                              2)

            #In case of a frontal/rotated face it
            # is called the landamark detector
            if(my_cascade.face_type > 0):
                landmarks_2D = my_detector.returnLandmarks(frame, face_x1, face_y1, face_x2, face_y2, points_to_return=TRACKED_POINTS)

                if(DEBUG == True):
                    #cv2.drawKeypoints(frame, landmarks_2D)

                    for point in landmarks_2D:
                        cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)


                #Applying the PnP solver to find the 3D pose
                # of the head from the 2D position of the
                # landmarks.
                #retval - bool
                #rvec - Output rotation vector that, together with tvec, brings
                # points from the model coordinate system to the camera coordinate system.
                #tvec - Output translation vector.
                retval, rvec, tvec = cv2.solvePnP(landmarks_3D,
                                                  landmarks_2D,
                                                  camera_matrix, camera_distortion)

                #Now we project the 3D points into the image plane
                #Creating a 3-axis to be used as reference in the image.
                axis = numpy.float32([[50,0,0],
                                      [0,50,0],
                                      [0,0,50]])
                imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix, camera_distortion)

                #Drawing the three axis on the image frame.
                #The opencv colors are defined as BGR colors such as:
                # (a, b, c) >> Blue = a, Green = b and Red = c
                #Our axis/color convention is X=R, Y=G, Z=B
                sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
                cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
                cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255,0,0), 3) #BLUE
                cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0,0,255), 3) #RED

        #Drawing a yellow rectangle
        # (and text) around the ROI.
        if(DEBUG == True):
            text_x1 = roi_x1
            text_y1 = roi_y1 - 3
            if(text_y1 < 0): text_y1 = 0
            cv2.putText(frame, "ROI", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1);
            cv2.rectangle(frame,
                         (roi_x1, roi_y1),
                         (roi_x2, roi_y2),
                         (0, 255, 255),
                         2)

        #Showing the frame and waiting
        # for the exit command
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break

    #Release the camera
    video_capture.release()
    print("Bye...")
def display_image(img, shape):
    win = dlib.image_window()
    win.clear_overlay()
    win.set_image(img)
    if shape:
        win.add_overlay(shape)
Пример #38
0
    #open csv 寫入檔
    csvname = ImageName + '_record.csv'
    csvfile = open(csvname, 'w', newline='')
    #設定字典
    dic = ['index', 'coordinate_x', 'coordinate_y']
    writer = csv.DictWriter(csvfile, fieldnames=dic)
    writer.writeheader()

    # 使用特徵提取器get_frontal_face_detector
    detector = dlib.get_frontal_face_detector()
    # dlib的68點模型,使用作者訓練好的特徵預測器
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # 圖片所在路徑
    img = io.imread(ImageName + filetype)
    # 生成dlib的圖像窗口
    win = dlib.image_window()  #能夠在螢幕上顯示圖像的GUI窗口
    win.clear_overlay()  #從image_window中刪除所有疊加層
    win.set_image(img)  #重載功能
    # 特徵提取器的實例化
    dets = detector(img, 1)
    print("人臉數:", len(dets))

    for k, d in enumerate(dets):
        print("第", k + 1, "個人臉d的座標:", "left:", d.left(), "right:", d.right(),
              "top:", d.top(), "bottom:", d.bottom())

        width = d.right() - d.left()
        heigth = d.bottom() - d.top()

        print('人臉面積為:', (width * heigth))
        # 利用預測器預測
Пример #39
0
def detect(path):

    if len(dataset_matrix) != 0:
        del dataset_matrix[:]

    win = dlib.image_window()
    frame = io.imread(path)
    #video_capture = cv2.VideoCapture(0)  # for webcam
    detector = dlib.get_frontal_face_detector()  # Face detector
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    #ret, frame = video_capture.read() #for webcam

    win.clear_overlay()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    clahe_image = clahe.apply(gray)

    detections = detector(clahe_image, 1)  # Detect the faces in the image

    print "Number of faces: {}".format(len(detections))

    if len(detections) == 0:
        return None, None

    #lists for x and y coordinates
    xlist = [[] for i in range(len(detections))]
    ylist = [[] for i in range(len(detections))]

    #lists for mean of x and y coordinates --> gets center of the face
    xmean = [[] for i in range(len(detections))]
    ymean = [[] for i in range(len(detections))]

    #matrixes of xlist and ylist for each face
    matrix = [[] for i in range(len(detections))]

    #distances of each landmark from the center of the face
    xcenter = [[] for i in range(len(detections))]
    ycenter = [[] for i in range(len(detections))]

    #relative coordinates in range [0,1]
    xnorm = [[] for i in range(len(detections))]
    ynorm = [[] for i in range(len(detections))]

    #lists for relative coordinates of means
    xmean_norm = [[] for i in range(len(detections))]
    ymean_norm = [[] for i in range(len(detections))]

    # lists for relative distances from the center of the face
    xcenter_norm = [[] for i in range(len(detections))]
    ycenter_norm = [[] for i in range(len(detections))]

    #matrixes of final results (contains lists: xcenter, ycenter, eucl_distances, angles)
    final = [[] for i in range(len(detections))]

    for k, d in enumerate(detections):  # For each detected face

        shape = predictor(clahe_image, d)
        for i in range(0, 68):  # 68 landmarks
            xlist[k].append(shape.part(i).x)
            ylist[k].append(shape.part(i).y)
            cv2.circle(frame, (shape.part(i).x, shape.part(i).y),
                       1, (255, 0, 0),
                       thickness=2)  # draw red dots

        xmean[k] = numpy.mean(xlist[k])  #mean of x coordinates
        ymean[k] = numpy.mean(ylist[k])  #mean of y coordinates

        matrix[k] = numpy.column_stack((xlist[k], ylist[k]))
        for x, y in matrix[k]:
            cv2.line(frame, (x, y), (int(xmean[k]), int(ymean[k])),
                     (0, 255, 0),
                     thickness=1)  #draw lines

        cv2.circle(frame, (int(xmean[k]), int(ymean[k])),
                   1, (0, 0, 255),
                   thickness=2)  # draw center dot

        xcenter[k] = ([x - xmean[k] for x in xlist[k]])  #distances from the
        ycenter[k] = ([y - ymean[k] for y in ylist[k]])  #center of the face

        win.set_image(frame)
        win.add_overlay(detections)

        #print min(xlist[k]), max(xlist[k]), min(ylist[k]), max(ylist[k])

        for i in xlist[k]:
            xnorm[k].append(
                float((i - min(xlist[k]))) / float(
                    (max(xlist[k]) - min(xlist[k]))))

        for i in ylist[k]:
            ynorm[k].append(
                float((i - min(ylist[k]))) / float(
                    (max(ylist[k]) - min(ylist[k]))))

        #print xnorm[k]
        #print ynorm[k]

        #print xmean
        #print ymean

        xmean_norm[k] = float((xmean[k] - min(xlist[k]))) / float(
            (max(xlist[k]) - min(xlist[k])))
        ymean_norm[k] = float((ymean[k] - min(ylist[k]))) / float(
            (max(ylist[k]) - min(ylist[k])))

        print "Relative means"
        print xmean_norm
        print ymean_norm

        print "Relative distances"
        xcenter_norm[k] = ([x - xmean_norm[k]
                            for x in xnorm[k]])  # relative distances from the
        ycenter_norm[k] = ([y - ymean_norm[k]
                            for y in ynorm[k]])  # center of the face

        print xcenter_norm
        print ycenter_norm

        for i in range(len(xcenter_norm[k])):
            final[k].append(xcenter_norm[k][i])
            final[k].append(ycenter_norm[k][i])

        #for i in range(len(xcenter_norm[k])):
        #h = numpy.hypot(abs(xcenter_norm[k][i]), abs(ycenter_norm[k][i]))
        #print h
        #final[k].append(h)

        emotion = write_emotion(path)
        #if emotion != -1:
        print EMOTIONS[emotion]
        final[k].append(emotion)
        #else:
        #print ValueError

        print final[k]
        dataset_matrix.append(final[k])
        #write_to_csv(final[k])

        print "from detector: {}".format(dataset_matrix)

    print final

    return frame, dataset_matrix
Пример #40
0
    def train(train, model, test, flips, C, threads, verbose):

        # Now let's do the training.  The train_simple_object_detector() function has a
        # bunch of options, all of which come with reasonable default values.  The next
        # few lines goes over some of these options.
        options = dlib.simple_object_detector_training_options()

        #**********************REMOVE!!!!!!!!!!!!!!!!!******************
        #Since faces are left/right symmetric we can tell the trainer to train a
        # symmetric detector.  This helps it get the most value out of the training
        # data.
        options.add_left_right_image_flips = flips

        # The trainer is a kind of support vector machine and therefore has the usual
        # SVM C parameter.  In general, a bigger C encourages it to fit the training
        # data better but might lead to overfitting.  You must find the best C value
        # empirically by checking how well the trained detector works on a test set of
        # images you haven't trained on.  Don't just leave the value set at 5.  Try a
        # few different C values and see what works best for your data.
        options.C = C
        # Tell the code how many CPU cores your computer has for the fastest training.
        options.num_threads = threads
        options.be_verbose = verbose

        # You just need to put your images into a list. TrainingImages takes a folder
        #and extracts the images and bounding boxes for each image
        trainingSet = TrainingImages(train)
        images = trainingSet.images

        # Then for each image you make a list of rectangles which give the pixel
        # locations of the edges of the boxes.
        # And then you aggregate those lists of boxes into one big list and then call
        # train_simple_object_detector().
        boxes = trainingSet.boxes

        if not images or not boxes:
            print "Training set is empty.  Check input directory for images and proper \
            formating of bounding box files!"

            sys.exit(2)

        testImages = images
        testBoxes = boxes
        if test != train:
            testSet = TrainingImages(test)
            testImages = testSet.images
            testBoxes = testSet.boxes

        count = 1
        width = 0
        height = 0

        ##Calculating boxes, aspect ratios etc.  ***May need to adjust logic
        ##to accommodate ambiguity.
        ##Also saving new masked images to disk to verify the correct
        ##annotations are being detected.
        if not os.path.exists(train + "/masked"):
            os.makedirs(train + "/masked")
        aspRatios = []
        flatARs = []
        dictARs = {}
        for j, i in enumerate(boxes):
            curImageName = trainingSet.imageNames[j]
            print "Image: ", curImageName
            newImage = images[j].copy()
            aRs = []
            for box in i:
                cv2.rectangle(newImage, (box.top(), box.left()),
                              (box.bottom(), box.right()), (255, 255, 255),
                              thickness=-3)
                width += box.width()
                height += box.height()
                ar = float(box.width()) / float(box.height())
                aRs.append(ar)
                dictARs[ar] = box
                flatARs.append(ar)
                count += 1
                print "Box:   ", box, "\t Area:  ", box.area(), "\tAR:  ", aRs

            aspRatios.append(aRs)
            print "\nAspect Ratios:  ", aspRatios, "\nDictionary: ", dictARs
            baseName = curImageName.split("/")[-1]
            newImageName = train + "/masked/" + (baseName).replace(
                ".jpg", "-boxes.jpg")
            print "\nSaving:  ", newImageName, "\n"
            cv2.imwrite(newImageName, newImage)

        ##Calculating the mean and standard deviation (May not need mean
        ##not currently using it...)
        aRMean = np.mean(flatARs, 0)
        aRStd = np.std(flatARs, 0)

        print "Aspect Ratio Mean:  ", aRMean, "  Std:  ", aRStd

        target_size = float(width / count) * float(height / count)
        #Update the sliding window size based on input data
        width, height = PlateDetector.bestWindow(boxes,
                                                 target_size=target_size)
        targetSize = int(width * height)
        targetAr = float(width) / height
        options.detection_window_size = targetSize
        print "New Width: ", width, "\tNew Height", height, "!!!"
        print "Target size:  ", targetSize, "  Target AR:  ", targetAr

        ##Deleting boxes with aspect ratios that are above or below the target
        ##aspect ratio plus one standard deviation.  bestWindow estimates a target
        ##aspect ratio close to (but not exactly) the mean.  This logic was borrowed
        ##from a dlib C++ HOG training example.  Not sure why they didn't just use the
        ##mean, but this seems to work fine.
        for i, imgArs in enumerate(aspRatios):
            for boxArs in imgArs:
                if (boxArs > (targetAr + aRStd)) or (boxArs <
                                                     (targetAr - aRStd)):
                    print "Deleting box ", dictARs[boxArs]
                    boxes[i].remove(dictARs[boxArs])
                    print "New boxes:  ", boxes

        #Train
        detector = dlib.train_simple_object_detector(images, boxes, options)
        # We could save this detector to disk by uncommenting the following.
        detector.save(model)

        # Now let's look at its HOG filter!
        # win_det.set_image(detector)
        # dlib.hit_enter_to_continue()
        win_det = dlib.image_window()
        win_det.set_image(detector)

        # Note that you don't have to use the XML based input to
        # test_simple_object_detector().  If you have already loaded your training
        # images and bounding boxes for the objects then you can call it as shown
        # below.
        print("\nTraining accuracy: {}".format(
            dlib.test_simple_object_detector(testImages, testBoxes, detector)))
Пример #41
0
            users[face_descriptor] = name

p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
                channels=1,
                rate=8000,
                input=True,
                frames_per_buffer=1024)
stream.start_stream()

in_speech_bf = False
decoder.start_utt()
shape = None

cap = cv2.VideoCapture(-1)
win1 = dlib.image_window()

while True:
    voice_command = ""
    ret, frame = cap.read()
    buf = stream.read(1024)
    if buf:
        decoder.process_raw(buf, False, False)
        if decoder.get_in_speech() != in_speech_bf:
            in_speech_bf = decoder.get_in_speech()
            if not in_speech_bf:
                decoder.end_utt()
                try:
                    voice_command = decoder.hyp().hypstr
                except:
                    voice_command = ""
Пример #42
0
def DetectFaceInListDlib(frameList, faceDetector = None, skipLength = 2, debug = False):
	'''
	Given a frame list, detect (track) the faces
	Returns subimages of faces after normalization and smoothing the enclosing rectangle
	'''
	if ((faceDetector is None)):
		predictorPath = 'coreData/shape_predictor_68_face_landmarks.dat'
		faceDetector = dlib.get_frontal_face_detector()

	if (debug):
		win = dlib.image_window()
		win.clear_overlay()

	faceList = []
	newFrameList = []
	rowList = []
	colList = []
	detsList = []
	smoothRowSize = []
	smoothColSize = []
	winSize = (20/skipLength)

	for i in range(0, frameList.shape[0], skipLength):
		frame = frameList[i]
		dets = faceDetector(frame, 1)
		dets = list(enumerate(dets))
		if (len(dets) != 1):
			continue
		detsList.append(dets)
		newFrameList.append(frame)
		for k, d in (dets):
			rowList.append(np.abs(d.left() - d.right()))
			colList.append(np.abs(d.top() - d.bottom()))

	for i in range(len(rowList)):
		rowAvg = np.mean(rowList[max(0,i-winSize):min(len(rowList),i+winSize)]) + 6
		colAvg = np.mean(colList[max(0,i-winSize):min(len(colList),i+winSize)]) + 6
		smoothRowSize.append(int(round(rowAvg)))
		smoothColSize.append(int(round(colAvg)))

	for i in range(len(detsList)):
		dets = detsList[i]
		frame = newFrameList[i]
		grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		
		rowc = (d.left() + d.right())/2
		colc = (d.top() + d.bottom())/2	
		rows = smoothRowSize[i]
		cols = smoothColSize[i]
		# Forcefully make the enclosing box a square
		rows = max(rows, cols)
		cols = max(rows, cols)
		
		faceImg = grayFrame[max(0,colc-(cols/2)):min(frame.shape[0],colc+(cols/2)+1), max(0,rowc-(rows/2)):min(frame.shape[1],rowc+(rows/2)+1)]
		# Smoothing rectangle sizes (Running average of -50/+50 frames)
		faceImg = cv2.equalizeHist(faceImg)
		# Illumination (CLAHE normalization) Normalization

		if debug:
			win.clear_overlay()
			grayFrame[max(0,colc-(cols/2)):min(frame.shape[0],colc+(cols/2)+1), max(0,rowc-(rows/2)):min(frame.shape[1],rowc+(rows/2)+1)] = faceImg
			win.set_image(grayFrame)
			dlib.hit_enter_to_continue()
		
		faceImg = cv2.resize(faceImg, (100, 100))
		faceImg = np.array(faceImg)
		faceList.append(faceImg)

	faceList = np.array(faceList)
	return faceList
Пример #43
0
    def draw_face_landmarks(self,
                            image,
                            dets=None,
                            shapes=[],
                            return_drawn_landmarks=False,
                            draw_type="line"):

        print("\nDrawing face landmarks..\n")

        if not return_drawn_landmarks:
            win = dlib.image_window()
            win.set_image(image)

        if self.sp == self.shape_68_face_landmarks:
            face_landmarks_list = face_utils.FACIAL_LANDMARKS_68_IDXS
        else:
            face_landmarks_list = face_utils.FACIAL_LANDMARKS_5_IDXS

        if image is None:
            print("Please provide an image")
            exit()

        if dets is None:
            dets = self.detect_face(image=image)

        if len(shapes) == 0:
            shapes = self.detect_face_landmarks(image=image, dets=dets)

        # DOnly raw landmarks and display in dlib window if we are not returning the image
        if not return_drawn_landmarks:
            for shape in shapes:
                win.add_overlay(shape, dlib.rgb_pixel(0, 255, 0))

        # Draw landmarks over the image using opencv line or circle to return the drawn image
        if return_drawn_landmarks:
            for shape in shapes:
                shape = face_utils.shape_to_np(shape)

                # Loop over the face parts individually
                for (name, (i, j)) in face_landmarks_list.items():

                    # Loop over the subset of facial landmarks, drawing the
                    # specific face part

                    px = None
                    py = None

                    for (x, y) in shape[i:j]:

                        if draw_type == "line":
                            if px is None and py is None:
                                px, py = x, y
                            cv2.line(image, (px, py), (x, y), (0, 255, 0), 2)
                            px, py = x, y

                        else:
                            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

            return image

        else:
            dlib.hit_enter_to_continue()
            return image
Пример #44
0
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))





# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# results.
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)
    dets = detector(img)
    print("Number of faces detected: {}".format(len(dets)))
    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))
Пример #45
0
def get_triangulation(im,
                      gray_image,
                      a=50,
                      b=55,
                      c=0.15,
                      show=False,
                      randomize=False):
    '''Returns triangulations'''
    # Using canny edge detection.
    #
    # Reference: http://docs.opencv.org/3.1.0/da/d22/tutorial_py_canny.html
    # First argument: Input image
    # Second argument: minVal (argument 'a')
    # Third argument: maxVal (argument 'b')
    #
    # 'minVal' and 'maxVal' are used in the Hysterisis Thresholding step.
    # Any edges with intensity gradient more than maxVal are sure to be edges
    # and those below minVal are sure to be non-edges, so discarded. Those who
    # lie between these two thresholds are classified edges or non-edges based
    # on their connectivity.
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)
    edges = cv2.Canny(gray_image, a, b)
    if show:
        cv2.imshow('Canny', edges)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        win = dlib.image_window()
    # Set number of points for low-poly edge vertices
    num_points = int(np.where(edges)[0].size * c)
    # Return the indices of the elements that are non-zero.
    # 'nonzero' returns a tuple of arrays, one for each dimension of a,
    # containing the indices of the non-zero elements in that dimension.
    # So, r consists of row indices of non-zero elements, and c column indices.
    r, c = np.nonzero(edges)
    # r.shape, here, returns the count of all points that belong to an edge.
    # So 'np.zeros(r.shape)' an array of this size, with all zeros.
    # 'rnd' is thus an array of this size, with all values as 'False'.
    rnd = np.zeros(r.shape) == 1
    # Mark indices from beginning to 'num_points - 1' as True.
    rnd[:num_points] = True
    # Shuffle
    np.random.shuffle(rnd)
    # Randomly select 'num_points' of points from the set of all edge vertices.
    r = r[rnd]
    c = c[rnd]
    # Number of rows and columns in image
    sz = im.shape
    r_max = sz[0]
    c_max = sz[1]
    # Co-ordinates of all randomly chosen points
    pts = np.vstack([r, c]).T
    if randomize:
        rand_offset = 50
        rand_dirs = [(0, rand_offset), (-rand_offset, 0), (0, -rand_offset),
                     (rand_offset, 0)]
        rnd_count = 0
        for point in pts:
            if random.random() < 0.3:
                rnd_count += 1
                rand_dir = random.randint(0, 3)
                point[0] += rand_dirs[rand_dir][0]
                point[1] += rand_dirs[rand_dir][1]
    # Append (0,0) to the vertical stack
    pts = np.vstack([pts, [0, 0]])
    # Append (0,c_max) to the vertical stack
    pts = np.vstack([pts, [0, c_max]])
    # Append (r_max,0) to the vertical stack
    pts = np.vstack([pts, [r_max, 0]])
    # Append (r_max,c_max) to the vertical stack
    pts = np.vstack([pts, [r_max, c_max]])
    # Append some random points to fill empty spaces
    pts = np.vstack([pts, np.random.randint(0, 750, size=(100, 2))])
    # print(len(pts))
    # pts = my_reduce(pts, 5)
    # print(len(pts))
    dets = detector(im, 1)
    # print("Number of faces detected: {}".format(len(dets)))
    if show:
        win.clear_overlay()
        win.set_image(im)
    for k, d in enumerate(dets):
        shape = predictor(im, d)
        for i in range(shape.num_parts):
            pts = np.vstack([pts, [shape.part(i).x, shape.part(i).y]])
        if show:
            win.add_overlay(shape)
    if show:
        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
    # Construct Delaunay Triangulation from these set of points.
    # Reference: https://en.wikipedia.org/wiki/Delaunay_triangulation
    tris = Delaunay(pts, incremental=True)
    # tris_vertices = pts[tris.simplices]
    # for tri in range(tris_vertices.shape[0]):
    #     x_coords = []
    #     y_coords = []
    #     print(tris_vertices[tri])
    #     for coord in range(tris_vertices.shape[1]):
    #         x_coords.append(tris_vertices[tri][coord][0])
    #         y_coords.append(tris_vertices[tri][coord][1])
    # divideHighVariance(tris, im)
    tris.close()
    # exit(0)
    # Return triangulation
    return tris
Пример #46
0
#   running the command:
#       sudo apt-get install libboost-python-dev cmake
#
#   Also note that this example requires scikit-image which can be installed
#   via the command:
#       pip install scikit-image
#   Or downloaded from http://scikit-image.org/download.html. 

import sys

import dlib
from skimage import io


detector = dlib.get_frontal_face_detector()
win = dlib.image_window()

for f in sys.argv[1:]:
    print("Processing file: {}".format(f))
    img = io.imread(f)
    # The 1 in the second argument indicates that we should upsample the image
    # 1 time.  This will make everything bigger and allow us to detect more
    # faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            i, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
Пример #47
0
40 E0 D0    which is way lot shorter

How do we change the tone of colors?
We just pass the RGB data of the image and pass it to a mapping 
function which changes the tone of the color as described in that function 

'''

file_name = "pic4.jpg"
# Create a HOG face detector using the built-in dlib class
face_detector = dlib.get_frontal_face_detector()
#face_detector= dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat")
# for side face detection
side_face_cascade = cv2.CascadeClassifier('haarcascade_profileface.xml')

win = dlib.image_window()  # A window used to display the image

# Load the image into an array
image = io.imread("pic4.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Run the HOG face detector on the image data.
# The result will be the bounding boxes of the faces in our image.
detected_faces = face_detector(image, 1)
side_faces = side_face_cascade.detectMultiScale(gray, 1.1, 5)
print("I found {} front faces in the file {}".format(len(detected_faces),
                                                     file_name))

print("I found {}side  faces in the file {}".format(len(side_faces),
                                                    file_name))