Example #1
0
    def detect_face(self, img, win):
        dets = detector(img, 1)

        if len(dets) == 0:
            raise NoFaces()

        largest_det = None
        l_sq = None
        for d in dets:
            win.add_overlay(d)
            sq = (d.right() - d.left()) * (d.top() - d.bottom())
            sq = abs(sq)
            if largest_det is None or sq > l_sq:
                largest_det = d
                l_sq = sq

        d = largest_det
        print("Detection: Left: {} Top: {} Right: {} Bottom: {}".format(
            d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = sp(img, d)
        # Draw the face landmarks on the screen so we can see what face is currently being processed.
        win.add_overlay(shape, color=dlib.rgb_pixel(0, 255, 0))
        face_descriptor = facerec.compute_face_descriptor(img, shape)

        self.faces.append(img)
        self.vecs.append(face_descriptor)
Example #2
0
    def show(self, out):
        if not self.wi:
            return
        wi = self.wi
        t, depthimage, colorimage, shape = out
        wi.set_image(colorimage)
        wi.clear_overlay()
        self.framecount += 1
        if shape is not None:

            def topoint(dp):
                return dlib.point(dp.x, dp.y)

            if True:
                fobp = dlib.full_object_detection(
                    shape.rect,
                    [topoint(shape.part(i)) for i in range(shape.num_parts)])
                #for jj in range(fobp.num_parts):
                if False:
                    p = fobp.part(jj)
                    if jj > self.framecount % 68:
                        break
                    self.wi.add_overlay_circle(dlib.dpoint(p.x, p.y),
                                               2,
                                               color=dlib.rgb_pixel(
                                                   255, 255, 0))
                wi.add_overlay(fobp)
Example #3
0
def see_coins(img_path, boxes, cols):
    img = io.imread(img_path)
    #draw each box
    win = dlib.image_window()
    win.set_image(img)
    for i in np.arange(len(boxes)):
        win.add_overlay(boxes[i], color=dlib.rgb_pixel(cols[i][0], cols[i][1], cols[i][2]))
    #plt.figure()
    #plt.imshow(img)
    
    win.wait_until_closed() 
Example #4
0
def test_rgb_pixel():
    p = rgb_pixel(0, 50, 100)
    assert p.red == 0
    assert p.green == 50
    assert p.blue == 100
    assert str(p) == "red: 0, green: 50, blue: 100"
    assert repr(p) == "rgb_pixel(0,50,100)"

    p = rgb_pixel(blue=0, red=50, green=100)
    assert p.red == 50
    assert p.green == 100
    assert p.blue == 0
    assert str(p) == "red: 50, green: 100, blue: 0"
    assert repr(p) == "rgb_pixel(50,100,0)"

    p.red = 100
    p.green = 0
    p.blue = 50
    assert p.red == 100
    assert p.green == 0
    assert p.blue == 50
    assert str(p) == "red: 100, green: 0, blue: 50"
    assert repr(p) == "rgb_pixel(100,0,50)"
Example #5
0
def test_rgb_pixel():
    p = rgb_pixel(0, 50, 100)
    assert p.red == 0
    assert p.green == 50
    assert p.blue == 100
    assert str(p) == "red: 0, green: 50, blue: 100"
    assert repr(p) == "rgb_pixel(0,50,100)"

    p = rgb_pixel(blue=0, red=50, green=100)
    assert p.red == 50
    assert p.green == 100
    assert p.blue == 0
    assert str(p) == "red: 50, green: 100, blue: 0"
    assert repr(p) == "rgb_pixel(50,100,0)"

    p.red = 100
    p.green = 0
    p.blue = 50
    assert p.red == 100
    assert p.green == 0
    assert p.blue == 50
    assert str(p) == "red: 100, green: 0, blue: 50"
    assert repr(p) == "rgb_pixel(100,0,50)"
Example #6
0
 def detect(self, file_):
     self.file = file_
     for img in self.file:
         print('Processing image {}'.format(img))
         img = dlib.load_rgb_image(img)
         dets = self.detector(img, 1)
         print('Number of faces detected: {}'.format(len(dets)))
         self.win.clear_overlay()
         self.win.set_image(img)
         self.win.set_title('Number of faces detected: {}'.format(
             len(dets)))
         self.win.add_overlay(
             dets,
             color=dlib.rgb_pixel(0, 0, 255),
         )
         dlib.hit_enter_to_continue()
Example #7
0
detector = dlib.simple_object_detector(detectors)
print("upsampling = ", detector.upsampling_amount)
detector.upsampling_amount = 1

#detector.save("combined_det.svm")
#detector = dlib.simple_object_detector('combined_det.svm')

detector_all = dlib.simple_object_detector('detector_all.svm')
#detector_all = dlib.simple_object_detector('detector.svm')
print("upsampling = ", detector.upsampling_amount)

win = dlib.image_window()

for f in sys.argv[1:]:
    print(f)
    img = dlib.load_rgb_image(f)

    dets = detector(img, 2)
    dets_all = detector_all(img, 2)
    #dets, scores, idxs = dlib.simple_object_detector.run_multiple(detectors, img, 2)

    # grow these rectangles slightly so they don't overlap dets in the image_window
    dets_all = [dlib.grow_rect(d, 1) for d in dets_all]

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets_all, dlib.rgb_pixel(0, 255, 0))
    win.add_overlay(dets)

    input("hit enter to continue")
Example #8
0
# Create a HOG face detector using the built-in dlib class
face_detector = dlib.get_frontal_face_detector()

win = dlib.image_window()

# Load the image into an array
image = io.imread(file_name)

# Run the HOG face detector on the image data.
# The result will be the bounding boxes of the faces in our image.
detected_faces = face_detector(image, 1)

print("Found {} faces in the file {}".format(len(detected_faces), file_name))

# Open a window on the desktop showing the image
win.set_image(image)

# Loop through each face we found in the image
for i, face_rect in enumerate(detected_faces):
    # Detected faces are returned as an object with the coordinates
    # of the top, left, right and bottom edges
    print("- Face #{} found at Left: {} Top: {} Right: {} Bottom: {}".format(
        i, face_rect.left(), face_rect.top(), face_rect.right(),
        face_rect.bottom()))

    # Draw a box around each face we found
    win.add_overlay(face_rect, dlib.rgb_pixel(0, 255, 0))

# Wait until the user hits <enter> to close the window
dlib.hit_enter_to_continue()
Example #9
0
import sys
import cv2
import dlib

file = 'test.mp4'
cap = cv2.VideoCapture(file)
detector = dlib.get_frontal_face_detector()
win = dlib.image_window()
while True:
    ret, frame = cap.read()
    dets = detector(frame, 1)
    print('Number of faces detected: {}'.format(len(dets)))
    #for i, d in enumerate(dets):
    #   print('Detection {}: Left: {} Top: {} Right: {} Bottom: {}'.format(i, d.left(), d.top(), d.right(), d.bottom()))
    win.clear_overlay()
    win.set_image(frame)
    win.set_title('Number of faces detected: {}'.format(len(dets)))
    win.add_overlay(dets, color=dlib.rgb_pixel(255, 255, 0))
    #for (w, y0, y1, x0, x1) in squares:
    #shapes = dlib.full_object_detections()
    #for idx, d in enumerate(dets):
    if len(dets) == 0:
        tracker = None
    for d in dets:
        if tracker is None:
            tracker = dlib.correlation_tracker()
            tracker.start_track(image, d)
            win.add_overlay(d)
        else:
            tracker.update(image)
            pos = tracker.get_position()
            win.add_overlay(pos)
            win.add_overlay(d, color=dlib.rgb_pixel(0, 255, 0))
        #d.rectangle(((x0,y0),(x1,y1)), outline = (0, 255, 0))
        #
        #detd = dlib.rectangle(left=int(x0), top = int(y0), right = int(x1), bottom = int(y1))
        #detd = d
        #shape = face_pose_predictor(image, detd)
        #shapes.append(face_pose_predictor(image, detd))
        #win.add_overlay(shape)
        #win.add_overlay(detd)

        #facePred[0] = facerec.compute_face_descriptor(image, shape)
        #if (clf.predict(facePred)==1):
        #win.add_overlay(detd)
        #arrayToSave.append(face_descriptor)
        #print(face_descriptor)
        #lmFaces.append(shape)
image_file = sys.argv[2]

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()

print("Processing file: {}".format(image_file))
img = dlib.load_rgb_image(image_file)

win.clear_overlay()
win.set_image(img)

dets = detector(img, 2)
print("Number of faces detected: {}".format(len(dets)))
for index, det in enumerate(dets):
    print("Detection {}: LEFT: {}, TOP: {}, RIGHT: {}, BOTTOM: {}".format(index, det.left(), det.top(), det.right(), det.bottom()))
    shape = predictor(img, det)
    
    # when you want to visualize divided landmarks 
    for marks, color  in zip(landmarks , colors ):
        for i in marks:
            win.add_overlay_circle(shape.part(i), 1, dlib.rgb_pixel(color[2], color[1], color[0]))
    
    # when you jush want to see whole landmarks
    # win.add_overlay(shape)


win.add_overlay(dets)
dlib.hit_enter_to_continue()

for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))

    img = dlib.load_rgb_image(f)

    win.clear_overlay()
    win.set_image(img)

    dets = detector(img, 1)

    print("Number of faces detected: {}".format(len(dets)))

    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

        shape = predictor(img, d)

        print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                  shape.part(1)))

        for j in range(68):
            x, y = shape.part(j).x, shape.part(j).y
            win.add_overlay_circle(dlib.point(x, y), 1,
                                   dlib.rgb_pixel(0, 0, 255))

        win.add_overlay(shape)

    win.add_overlay(dets)

    dlib.hit_enter_to_continue()
Example #13
0
    def draw_face_landmarks(self,
                            image,
                            dets=None,
                            shapes=[],
                            return_drawn_landmarks=False,
                            draw_type="line"):

        print("\nDrawing face landmarks..\n")

        if not return_drawn_landmarks:
            win = dlib.image_window()
            win.set_image(image)

        if self.sp == self.shape_68_face_landmarks:
            face_landmarks_list = face_utils.FACIAL_LANDMARKS_68_IDXS
        else:
            face_landmarks_list = face_utils.FACIAL_LANDMARKS_5_IDXS

        if image is None:
            print("Please provide an image")
            exit()

        if dets is None:
            dets = self.detect_face(image=image)

        if len(shapes) == 0:
            shapes = self.detect_face_landmarks(image=image, dets=dets)

        # DOnly raw landmarks and display in dlib window if we are not returning the image
        if not return_drawn_landmarks:
            for shape in shapes:
                win.add_overlay(shape, dlib.rgb_pixel(0, 255, 0))

        # Draw landmarks over the image using opencv line or circle to return the drawn image
        if return_drawn_landmarks:
            for shape in shapes:
                shape = face_utils.shape_to_np(shape)

                # Loop over the face parts individually
                for (name, (i, j)) in face_landmarks_list.items():

                    # Loop over the subset of facial landmarks, drawing the
                    # specific face part

                    px = None
                    py = None

                    for (x, y) in shape[i:j]:

                        if draw_type == "line":
                            if px is None and py is None:
                                px, py = x, y
                            cv2.line(image, (px, py), (x, y), (0, 255, 0), 2)
                            px, py = x, y

                        else:
                            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

            return image

        else:
            dlib.hit_enter_to_continue()
            return image
Example #14
0
diff = round(calculate_diff(normal_image, restored), 3)
print(u'Восстановленное изображение отличается от оригинального '
      u'на {p}%'.format(p=diff))

# Выполняем расчет и анализ антропометрических характеристик для
# исходного и реконструированного ИЛ
points, det = find_face_landmarks(normal_image)
points_rest, det_rest = find_face_landmarks(restored)
sum_distance = round(calculate_distance_between_landmarks(points, points_rest),
                     3)
print(u'Сумма Евклидовых расстояний между антропометрическими точками: '
      u'{s}'.format(s=sum_distance))
diff = detect_antro_diff(points, points_rest)
columns = [
    u'Высота лица', u'Ширина лица', u'Ширина глазной щели', u'Ширина носа',
    u'Ширина губ', u'Высота носа', u'Высота от нижней губы до верхней'
]
rows = [u'Оригинал', u'Восстановленное', u'Разница', u'Разница %']
print(tabulate(diff, headers=columns, showindex=rows))
# Отображаем полученную антропометрику на исходном ИЛ для сравнения
# Создаем объект окна dlib
win = dlib.image_window()
win.set_title("Face landmarks")
win.clear_overlay()
win.set_image(normal_image)
# Исходные АПТ - синим цветом
win.add_overlay(det)
# АПТ реконструированного лица - красным цветом
win.add_overlay(det_rest, color=dlib.rgb_pixel(255, 0, 0))
win.wait_until_closed()
Example #15
0
                       json.load(f))
        testboxes.append(list(imgboxes))

idx = 0
print("Showing detections on the images in the faces folder...")
win = dlib.image_window()
print("Processing file: {}".format(f))
img = testimages[idx]
dets = detector(img)
# print(dets)
# print(boxes[0])
print("Number of faces detected: {}".format(len(dets)))
win.clear_overlay()
win.set_image(img)
win.add_overlay(dets)
color = dlib.rgb_pixel(0,255,0)
win.add_overlay(testboxes[idx], color=color)
dlib.hit_enter_to_continue()


# Next, suppose you have trained multiple detectors and you want to run them
# efficiently as a group.  You can do this as follows:
detector1 = dlib.fhog_object_detector("detector.svm")
# In this example we load detector.svm again since it's the only one we have on
# hand. But in general it would be a different detector.
detector2 = dlib.fhog_object_detector("detector.svm")
# make a list of all the detectors you wan to run.  Here we have 2, but you
# could have any number.
detectors = [detector1, detector2]
image = dlib.load_rgb_image(faces_folder + '/2008_002506.jpg')
[boxes, confidences, detector_idxs] = dlib.fhog_object_detector.run_multiple(detectors, image, upsample_num_times=1, adjust_threshold=0.0)