# Load DLIB'S CNN Face Detector model
print("[INFO] Loading CNN face detection model...")
detector = dlib.cnn_face_detection_model_v1(args["model"])

# Load the input image from disk, resize it and then convert it from BGR to RGB channel ordering
# (which is what the dlib expects)
image = cv2.imread(args["image"])
image = imutils.resize(image, width=600)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Perform face detection using dlib's face detector
start = time.time()
print("[INFO] Performing face detection with dlib face detector...")
results = detector(rgb, args["upsample"])
end = time.time()
print("[INFO] Face detection took {:.4f} seconds".format(end - start))

# Convert the resulting dlib rectangle objects to bounding boxes, then ensure that the bounding boxes are all within
# the bounds of the input image
boxes = [convert_and_trim_bb(image, r.rect) for r in results]

# Loop over the bounding boxes
for (x, y, w, h) in boxes:
    # Draw the bounding box on the image
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

# Show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
# Loop over the frames from the video stream
while True:
    # Grab the frame from the threaded video stream, resize it to have a maximum width of 600 pixels and then convert it
    # from BGR to RGB channel ordering (which is what the dlib expects)
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # Perform face detection using dlib's face detector
    start = time.time()
    print("[INFO] Performing face detection with dlib face detector...")
    results = detector(rgb, args["upsample"])
    end = time.time()
    print("[INFO] Face detection took {:.4f} seconds".format(end - start))
    # Convert the resulting dlib rectangle objects to bounding boxes, then ensure that the bounding boxes are all within
    # the bounds of the input image
    boxes = [convert_and_trim_bb(frame, r.rect) for r in results]
    # Loop over the bounding boxes
    for (x, y, w, h) in boxes:
        # Draw the bounding box on the image
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    # Show the output image
    cv2.imshow("Output", frame)
    key = cv2.waitKey(1) & 0xFF
    # If the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# Do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Esempio n. 3
0
# Load DLIB's HOG + Liner SVM Face Detector model
print("[INFO] Loading HOG + Linear SVM face detector...")
detector = dlib.get_frontal_face_detector()

# Load the input image from disk, resize it and then convert it from BGR to RGB channel ordering
# (which is what dlib expects)
image = cv2.imread(args["image"])
image = imutils.resize(image, width=600)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Perform face detection using dlib's frontal face detector
start = time.time()
print("[INFO] Performing face detection with dlib...")
rects = detector(rgb, args["upsample"])
end = time.time()
print("[INFO] Face detection took {:.4f} seconds".format(end - start))

# Convert the resulting dlib rectangle objects to bounding boxes, then ensure the bounding boxes are all within the
# bounds of the input image
boxes = [convert_and_trim_bb(image, r) for r in rects]

# Loop over the bounding boxes
for (x, y, w, h) in boxes:
    # Draw the bounding box on the image
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

# Show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
Esempio n. 4
0
while True:
    # Grab the frame from the threaded video stream, resize it to have a maximum width of 600 pixels and then convert it
    # from BGR to RGB channel ordering (which is what the dlib expects)
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # Perform face detection using dlib's face detector
    start = time.time()
    print(
        "[INFO] Performing face detection with dlib frontal face detector...")
    rects = detector(rgb, args["upsample"])
    end = time.time()
    print("[INFO] Face detection took {:.4f} seconds".format(end - start))
    # Convert the resulting dlib rectangle objects to bounding boxes, then ensure that the bounding boxes are all within
    # the bounds of the input image
    boxes = [convert_and_trim_bb(frame, r) for r in rects]
    # Loop over the bounding boxes
    for (x, y, w, h) in boxes:
        # Draw the bounding box on the image
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    # Show the output image
    cv2.imshow("Output", frame)
    key = cv2.waitKey(1) & 0xFF
    # If the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

# Do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()