Ejemplo n.º 1
0
from signal import pause

import cv2

from pitop import Camera
from pitop.processing.algorithms.faces import FaceDetector


def find_faces(frame):
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        print(f"Face angle: {face.angle} \n"
              f"Face center: {face.center} \n"
              f"Face rectangle: {face.rectangle} \n")
    else:
        print("Cannot find face!")


camera = Camera(resolution=(640, 480), flip_top_bottom=True)
face_detector = FaceDetector()

camera.on_frame = find_faces

pause()
Ejemplo n.º 2
0
from time import sleep

import cv2

from pitop import Camera

cam = Camera(format="OpenCV")


def show_gray_image(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cv2.imshow("frame", gray)
    cv2.waitKey(1)  # Necessary to show image


# Use callback function for 60s
cam.on_frame = show_gray_image
sleep(60)


# Use get_frame indefinitely
try:
    while True:
        show_gray_image(cam.get_frame())

except KeyboardInterrupt:
    cv2.destroyAllWindows()
from pitop import Camera, Pitop

camera = Camera()
pitop = Pitop()
camera.on_frame = pitop.miniscreen.display_image
Ejemplo n.º 4
0
from pitop import Camera, PanTiltController
from pitop.processing.algorithms.faces import FaceDetector


def track_face(frame):
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        face_center = face.center
        pan_tilt.track_object(face_center)
        print(f"Face center: {face_center}")
    else:
        pan_tilt.track_object.stop()
        print("Cannot find face!")


face_detector = FaceDetector()

pan_tilt = PanTiltController(servo_pan_port="S0", servo_tilt_port="S3")
pan_tilt.tilt_servo.target_angle = 0
pan_tilt.pan_servo.target_angle = 0

camera = Camera(resolution=(640, 480))
camera.on_frame = track_face

pause()
Ejemplo n.º 5
0
from pitop.processing.algorithms.faces import EmotionClassifier, FaceDetector


def detect_emotion(frame):
    face = face_detector(frame)
    emotion = emotion_classifier(face)

    if emotion.found:
        print(f"{emotion_lookup[emotion.type]}", end="\r", flush=True)
    else:
        print("Face not found!")

    cv2.imshow("Emotion", emotion.robot_view)
    cv2.waitKey(1)


camera = Camera(resolution=(640, 480), flip_top_bottom=True)

face_detector = FaceDetector()
emotion_classifier = EmotionClassifier()
emotion_types = emotion_classifier.emotion_types
ascii_emotions = [":|", ":c", "D:<", ":)", ":(", ":O"]
emotion_lookup = {
    emotion_types[i]: ascii_emotions[i]
    for i in range(len(emotion_types))
}

camera.on_frame = detect_emotion

pause()