def detect_eyes(self, gray_image):
     detected_eyes = self.eye_cascade.detectMultiScale(gray_image)
     eyes = []
     for (x, y, w, h) in detected_eyes:
         eye = EyeObject()
         eye.set_from_points(dlib.point(x, y), dlib.point(x + w, y + h))
         eyes.append(eye)
     return eyes
Пример #2
0
    def test_dlib_shape_to_np_array_results(self):
        """Converts a few example dlib shapes (i.e. dlib.full_object_detection objects) to numpy arrays, and checks if
        the result is as expected"""
        dummy_shapes = [
            full_object_detection(rectangle(), [point(0, 0)]),
            full_object_detection(
                rectangle(1, 2, 3, 4),
                [point(1, 2),
                 point(2, 3),
                 point(3, 4),
                 point(4, 1)]),
            full_object_detection(rectangle(50, 100, 150, 200), [
                point(50, 100),
                point(100, 150),
                point(150, 200),
                point(200, 50)
            ])
        ]

        exptected_results = [[[0, 0]], [[1, 2], [2, 3], [3, 4], [4, 1]],
                             [[50, 100], [100, 150], [150, 200], [200, 50]]]

        for shape, result in zip(dummy_shapes, exptected_results):
            self.assertEqual(
                np.array_equal(utils.dlib_shape_to_np_array(shape), result),
                True)
def get_farthest_vertex_from_point(point, dimensions):
    ret_point = dlib.point(0, 0)
    dist = get_euclidian_distance(point, ret_point)
    vertices = [
        dlib.point(0, dimensions.y),
        dlib.point(dimensions.x, 0),
        dlib.point(dimensions.x, dimensions.y)
    ]

    for vertex in vertices:
        new_dist = get_euclidian_distance(point, vertex)
        if new_dist > dist:
            dist = new_dist
            ret_point = vertex
    return ret_point
Пример #4
0
    def encode_filter(filter_files):
        images = []
        faces = []

        FACE_ALIGNMENT = FaceAlignment(LandmarksType._2D,
                                       enable_cuda=True,
                                       flip_input=False)
        for i, filter_file in enumerate(filter_files):
            images.append(skimage.io.imread(str(filter_file)))
            faces.append(FACE_ALIGNMENT.get_landmarks(images[i]))
        FACE_ALIGNMENT = None

        face_recognition_model = face_recognition_models.face_recognition_model_location(
        )
        face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
        for i, face in enumerate(faces):
            if face is None:
                print('Warning: {} has no face.'.format(filter_files[i]))
                continue
            if len(face) > 1:
                print('Warning: {} has more than one face.'.format(
                    filter_files[i]))

            parts = []
            for p in face[0]:
                parts.append(dlib.point(p[0], p[1]))
            raw_landmark_set = dlib.full_object_detection(rect, parts)
            yield numpy.array(
                face_encoder.compute_face_descriptor(images[i],
                                                     raw_landmark_set, 1))
Пример #5
0
    def __init__(self, box, frame):
        """
        Initializes Kalman filter
        """
        # Person ID
        self.id = 0
        self.height = frame.shape[0]
        self.width = frame.shape[1]
        print(self.id, box)
        # Coordinates of a bounding box
        self.box = box
        if box[0] < 0 or box[1] < 0 or box[2] < 0 or box[3] < 0:
            print('---------------------')
            print('Unexpected box', box)
            print('---------------------')

        # Number of detection matches
        self.hits = 1

        # Number of detection mismatches
        self.misses = 0

        self.tracker = dlib.correlation_tracker()
        cent_x = int((box[0] + box[2]) / 2)
        cent_y = int((box[1] + box[3]) / 2)
        w = box[3] - box[1]
        h = box[2] - box[0]
        rect = dlib.centered_rect(dlib.point(cent_x, cent_y), h, w)
        self.tracker.start_track(frame, rect)
Пример #6
0
def geometry_msgs_points_to_face_landmarks(geometry_msgs_points):
    points = dlib.points()
    [
        points.append(dlib.point(int(p.x), int(p.y)))
        for p in geometry_msgs_points
    ]
    return points
Пример #7
0
def loadLandmarkFile(fileName):
    inputFileTxt = open(fileName, "r")
    lines = inputFileTxt.readlines()
    lastFrameId = -1
    landmarksList = []
    for i in range(len(lines)):
        line = lines[i].rstrip().split('\t')

        rectStr = line[2].split(':')
        rect = dlib.rectangle(left=int(rectStr[0]),
                              top=int(rectStr[1]),
                              right=int(rectStr[2]),
                              bottom=int(rectStr[3]))

        parts = []
        for j in range(len(line) - 5):
            pointStr = line[j + 3].split(':')
            parts.append(dlib.point(x=int(pointStr[0]), y=int(pointStr[1])))

        shape = dlib.full_object_detection(rect, parts)
        frameId = int(line[0])
        if lastFrameId != frameId:
            shapeList = []
            landmarksList.append(shapeList)
        shapeList.append(shape)
        lastFrameId = frameId

    print("Loaded landmarks for {} faces in {} frames.".format(
        len(lines), len(landmarksList)))
    return landmarksList
Пример #8
0
 def parse_photos(self, graph, photos):
     for photo in photos:
         if 'tags' not in photo:
             continue
         photo['images'].sort(key=lambda x: x['height']*x['width'])
         image_url = photo['images'][-1]
         width, height = image_url['width'], image_url['height']
         people = [
             (
                 point(int(t['x']*width/100.0),
                       int(t['y']*height/100.0)),
                 t
             )
             for t in photo['tags']['data']
             if t.get('x') and t.get('y')
         ]
         faces = yield find_faces_url(image_url['source'], hash_face=True)
         # go through the faces _we_ found and interpolate those results
         # with the tags from the image
         for face in faces:
             face['tags'] = []
             for p, tag in people:
                 if face['rect'].contains(p):
                     face['tags'].append(tag)
         photo['faces'] = faces
     return photos
Пример #9
0
def test_point_assignment():
    p = point(27, 42)
    p.x = 16
    assert p.x == 16
    assert p.y == 42
    p.y = 31
    assert p.x == 16
    assert p.y == 31
Пример #10
0
 def encode_one_face_local(image, faces):
     for points in faces:
         parts = []
         for p in points:
             parts.append(dlib.point(p[0], p[1]))
         raw_landmark_set = dlib.full_object_detection(rect, parts)
         yield numpy.array(
             face_encoder.compute_face_descriptor(image, raw_landmark_set,
                                                  1))
Пример #11
0
def test_point():
    p = point(27, 42)
    assert repr(p) == "point(27, 42)"
    assert str(p) == "(27, 42)"
    assert p.x == 27
    assert p.y == 42
    ser = pickle.dumps(p, 2)
    deser = pickle.loads(ser)
    assert deser.x == p.x
    assert deser.y == p.y
Пример #12
0
    def perform_generic_hough_transform(self, img, record_hit):
        assert(img.shape[0] == self.size)
        assert(img.shape[1] == self.size)


        cent = center(get_rect(img))
        even_size = self.size - (self.size%2)
        sqrt_2 = sqrt(2)

        for r in range(img.shape[0]):
            for c in range(img.shape[1]):
                val = img[r][c]
                if (val != 0):
                    x = c - cent.x
                    y = r - cent.y
                    # Now draw the curve in Hough space for this image point
                    for t in range(self.size):
                        theta = t*pi/even_size
                        radius = (x*cos(theta) + y*sin(theta))/sqrt_2 + even_size/2 + 0.5
                        rr = int(radius)
                        record_hit(point(t,rr), point(c,r), val)
Пример #13
0
def test_points():
    ps = points()

    ps.resize(5)
    assert len(ps) == 5
    for i in range(5):
        assert ps[i].x == 0
        assert ps[i].y == 0

    ps.clear()
    assert len(ps) == 0

    ps.extend([point(1, 2), point(3, 4)])
    assert len(ps) == 2

    ser = pickle.dumps(ps, 2)
    deser = pickle.loads(ser)
    assert deser[0].x == 1
    assert deser[0].y == 2
    assert deser[1].x == 3
    assert deser[1].y == 4
Пример #14
0
def substitute_points(dlib_points, mtcnn_points):
    #face left, face right, nose, mouth left, mouth right
    SUBSTITUTE = [0, 16, 30, 48, 54]
    
    new_points = dlib.points()
    len_mtcnn = len(mtcnn_points) // 2

    for i in range(len_mtcnn):
        dlib_points[SUBSTITUTE[i]] = dlib.point(
            round(float(mtcnn_points[i])),
            round(float(mtcnn_points[i + len_mtcnn])))

    return dlib_points
Пример #15
0
def face_encodings(face_image,
                   landmarks,
                   known_face_locations=None,
                   num_jitters=1):
    """
    Given an image, return the 128-dimension face encoding for each face in the image.

    :param face_image: The image that contains one or more faces
    :param known_face_locations: Optional - the bounding boxes of each face if you already know them.
    :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
    :return: A list of 128-dimensional face encodings (one for each face in the image)
    """
    if landmarks is None:
        raw_landmarks = _raw_face_landmarks(face_image,
                                            known_face_locations,
                                            model="small")
    else:
        raw_landmarks = [
            dlib.full_object_detection(parts=[
                dlib.point(int(landmarks[1][0]), int(landmarks[1][1])),
                dlib.point(int(landmarks[4][0]), int(landmarks[4][1])),
                dlib.point(int(landmarks[0][0]), int(landmarks[0][1])),
                dlib.point(int(landmarks[3][0]), int(landmarks[3][1])),
                dlib.point(int(landmarks[2][0]), int(landmarks[2][1]))
            ],
                                       rect=dlib.rectangle(
                                           known_face_locations[0][3],
                                           known_face_locations[0][0],
                                           known_face_locations[0][1],
                                           known_face_locations[0][2]))
        ]
    return [
        np.array(
            face_encoder.compute_face_descriptor(face_image, raw_landmark_set,
                                                 num_jitters))
        for raw_landmark_set in raw_landmarks
    ]
Пример #16
0
 def usesmallbox(t, b, l, r, scale, scale_top):
     h, w, _ = scaled_image.shape
     fh = b - t
     fw = r - l
     t = max(0, t - int(scale_top*fh))
     b = min(h, b + int(scale*fh))
     l = max(0, l - int(scale*fw))
     r = min(w, r + int(scale*fw))
     facebb = self.fd(scaled_image[t:b, l:r, :])
     if len(facebb) >= 1:
         if len(facebb) > 1:
             print("multiple faces detected, choosing one arbitrarily")
         rect = facebb[0]
         return dlib.translate_rect(rect, dlib.point(l, t))
     else:
         return None
def get_face_boxes(rgbImg, prev_bb, bbs_out):
    """A wrapper function for align.getAllFaceBoundingBoxes """
    res = align.getAllFaceBoundingBoxes(rgbImg)
    if not res:
        return

    local_left = res[0].left()
    local_right = res[0].right()
    local_top = res[0].top()
    local_bottom = res[0].bottom()

    padding = SERVER_FACE_SEARCH_PADDING
    anchor_point = dlib.point(
        y=max(prev_bb.top() - int(prev_bb.height() * padding), 0),
        x=max(prev_bb.left() - int(prev_bb.width() * padding), 0))
    if len(res) > 0:
        bbs_out.append(
            dlib.rectangle(left=local_left + anchor_point.x,
                           right=local_right + anchor_point.x,
                           top=local_top + anchor_point.y,
                           bottom=local_bottom + anchor_point.y))
    return
Пример #18
0
 def parse_photos(self, graph, photos):
     for photo in photos:
         if "tags" not in photo:
             continue
         photo["images"].sort(key=lambda x: x["height"] * x["width"])
         image_url = photo["images"][-1]
         width, height = image_url["width"], image_url["height"]
         people = [
             (point(int(t["x"] * width / 100.0), int(t["y"] * height / 100.0)), t)
             for t in photo["tags"]["data"]
             if t.get("x") and t.get("y")
         ]
         faces = yield find_faces_url(image_url["source"], hash_face=True)
         # go through the faces _we_ found and interpolate those results
         # with the tags from the image
         for face in faces:
             face["tags"] = []
             for p, tag in people:
                 if face["rect"].contains(p):
                     face["tags"].append(tag)
         photo["faces"] = faces
     return photos
def FakeHOGModel(_, face):
    return full_object_detection(face, [point(i, i) for i in range(20)])
Пример #20
0
                # crop the face from the image
                faceOfs = (tl[0], tl[1])
                faceImage = gray[tl[1]:br[1], tl[0]:br[0]]

        if faceImage is not None and faceImage.shape[
                0] > 0 and faceImage.shape[1] > 0:
            # apply detector on gray face image (on the whole image)
            rect = dlib.rectangle(0, 0, faceImage.shape[1], faceImage.shape[0])
            landmarks = landmarkDetector(faceImage, rect)
            if len(landmarks.parts()) == 68:
                for p in landmarks.parts():
                    cv2.circle(frame, (faceOfs[0] + p.x, faceOfs[1] + p.y), 2,
                               (0, 0, 255), -1)
                p = landmarks.parts()[36]  # left eye
                q = landmarks.parts()[45]  # right eye
                v = dlib.point(p.x - q.x, p.y - q.y)
                u = dlib.point(v.y, -v.x)
                angle = np.arctan2(u.y, u.x)
                angle = 180 * angle / np.pi
                angle -= 90
                angle = -angle
                print('angle', angle)
                # set iCub's body with the body model seen
                head.set((0, angle, 0, 0, 0, 0))

    cv2.imshow('camera', frame)
    if cv2.waitKey(1) == 27:
        break

cv2.destroyAllWindows()

if (len(sys.argv) == 4):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    capture = cv2.VideoCapture(sys.argv[1])

    width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(capture.get(cv2.CAP_PROP_FPS))
    dlib_frame_dim = dlib.point(width, height)

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')

    if not os.path.exists(sys.argv[2][0:sys.argv[2].rfind('/')]):
        os.makedirs(sys.argv[2][0:sys.argv[2].rfind('/')])
    out = cv2.VideoWriter(sys.argv[2], fourcc, fps, (width, height))

    frame_count = 0

    debug_file = open("empty.txt", 'w')

    if DEBUG:
        debug_file = open(
            os.path.join(sys.argv[2][0:sys.argv[2].rfind('/')], "debug.txt"),
            'a')
def get_mean_point(bounding_box):
    x = int(round((bounding_box[0] + bounding_box[2]) / 2))
    y = int(round((bounding_box[1] + bounding_box[3]) / 2))
    return dlib.point(x, y)
Пример #23
0
print(str(path_to_image))
print(detected_faces)
left, top, right, bottom = get_points(detected_faces, 0)
face_box = dlib.rectangle(left, top, right, bottom)
#d = dlib.rectangle(1,2,3,4)
#print(str(path_to_image) + "----" + str(detected_faces[0]) + "----" + str(d))
# Get pose/landmarks of those faces
# Will be used as an input to the function that computes face encodings
# This allows the neural network to be able to produce similar numbers
#for faces of the same people, regardless of camera angle and/or face positioning in the image
shapes_faces = [shape_predictor(image, face_box)]

face_traits = dlib.points()
len_points = len(points) // 2
for i in range(len_points):
    face_traits.append(dlib.point( round(float(points[i])) ,
        round(float(points[i + len_points]))))
shapes_points = [dlib.full_object_detection(face_box, face_traits)]

#if ( str(path_to_image) == "pics/rakoruja.jpg"):
#print("\n\n--------------------------------------------")
#img = cv2.imread(path_to_image)
draw_facial_points(image, shapes_faces[0].parts(), 3)
cv2.imwrite(os.path.join("./", "OUTPUT-COLOURFUL" +
    ".jpg"), image)
cv2.waitKey(30)

for i in range(len(shapes_faces[0].parts())):
    print(str(i) + " - " + str(shapes_faces[0].parts()[i]))
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))

    img = dlib.load_rgb_image(f)

    win.clear_overlay()
    win.set_image(img)

    dets = detector(img, 1)

    print("Number of faces detected: {}".format(len(dets)))

    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

        shape = predictor(img, d)

        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))

        for j in range(68):
            x, y = shape.part(j).x, shape.part(j).y
            win.add_overlay_circle(dlib.point(x, y), 1,
                                   dlib.rgb_pixel(0, 0, 255))

        win.add_overlay(shape)

    win.add_overlay(dets)

    dlib.hit_enter_to_continue()
Пример #25
0
 def array_to_landmarks(arr, rect):
     points = [dlib.point(x, y) for (x, y) in arr]
     lm = dlib.full_object_detection(rect, points)
     return lm
Пример #26
0
def test_point_init_kwargs():
    p = point(y=27, x=42)
    assert repr(p) == "point(42, 27)"
    assert str(p) == "(42, 27)"
    assert p.x == 42
    assert p.y == 27
Пример #27
0
def test12(image, points):
    size = image.shape
    #2D image points. If you change the image, you need to change vector
    image_points = np.array([
                            (points[0].x, points[0].y),     # Nose tip
                            (points[1].x, points[1].y),     # Chin
                            (points[2].x, points[2].y),     # Left eye left corner
                            (points[3].x, points[3].y),     # Right eye right corne
                            (points[4].x, points[4].y),     # Left Mouth corner
                            (points[5].x, points[5].y)      # Right mouth corner
                        ], dtype="double")

    # 3D model points.
    model_points = np.array([
                            (0.0, 0.0, 0.0),             # Nose tip
                            (0.0, -330.0, -65.0),        # Chin
                            (-225.0, 170.0, -135.0),     # Left eye left corner
                            (225.0, 170.0, -135.0),      # Right eye right corne
                            (-150.0, -150.0, -125.0),    # Left Mouth corner
                            (150.0, -150.0, -125.0)      # Right mouth corner

                        ])


    # Camera internals
    focal_length = size[1]
    center = (size[1]/2, size[0]/2)
    camera_matrix = np.array(
                         [[focal_length, 0, center[0]],
                         [0, focal_length, center[1]],
                         [0, 0, 1]], dtype = "double"
                         )

    #print("Camera Matrix :\n {0}".format(camera_matrix))

    dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)

    #print("Rotation Vector:\n {0}".format(rotation_vector))
    #print("Translation Vector:\n {0}".format(translation_vector))

    rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
    proj_matrix = np.hstack((rvec_matrix, translation_vector))
    eulerAngles = -cv2.decomposeProjectionMatrix(proj_matrix)[6]

    pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]
    pitch = math.degrees(math.asin(math.sin(pitch)))
    roll = -math.degrees(math.asin(math.sin(roll)))
    yaw = math.degrees(math.asin(math.sin(yaw)))

    # Project a 3D point (0, 0, 1000.0) onto the image plane.
    # We use this to draw a line sticking out of the nose
    (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)

    for p in image_points:
        cv2.circle(image, (int(p[0]), int(p[1])), 3, (0,0,255), -1)

    p1 = dlib.point(( int(image_points[0][0]), int(image_points[0][1])))
    p2 = dlib.point(( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])))

    line = dlib.line(p1, p2)
    win.add_overlay(line)


    pitch_compliance = 0 if (abs(pitch) > 5) else (-20*(abs(pitch))+100)
    roll_compliance = 0 if (abs(roll) > 8) else (-(100/8)*(abs(roll))+100)
    yaw_compliance = 0 if (abs(yaw) > 5) else ((-20*abs(int(yaw)))+100)
    return int((pitch_compliance+roll_compliance+yaw_compliance)/3)
Пример #28
0
 def topoint(dp):
     return dlib.point(dp.x, dp.y)
Пример #29
0
def pointcloud_to_dlib_parts(pcloud):
    return [dlib.point(int(p[1]), int(p[0])) for p in pcloud.points]
Пример #30
0
def pointcloud_to_dlib_parts(pcloud):
    return [dlib.point(int(p[1]), int(p[0])) for p in pcloud.points]
Пример #31
0
def _create_full_object_detection(css, landmark_points):
    points = [dlib.point(*point) for point in landmark_points]
    return dlib.full_object_detection(_css_to_rect(css), points)
Пример #32
0
 def cv_point_2_dlib(self, point):
     """Helper function to convert an OpenCV point to Dlib format (dlib.point)."""
     return dlib.point(point[0], point[1])