示例#1
0
def assemble_robot():
    robot = Pitop()

    robot.add_component(Camera(resolution=(640, 480), rotate_angle=90))
    robot.add_component(PincerController())
    robot.add_component(
        NavigationController(left_motor_port="M3", right_motor_port="M0")
    )

    return robot
示例#2
0
def assemble_robot():
    pitop = Pitop()

    pitop.add_component(TiltRollHeadController())
    pitop.head.calibrate()

    pitop.add_component(Camera(resolution=(640, 480), flip_top_bottom=True))

    pitop.add_component(UltrasonicSensor("A1"))

    return pitop
示例#3
0
from time import sleep

import cv2

from pitop import Camera

cam = Camera(format="OpenCV")


def show_gray_image(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cv2.imshow("frame", gray)
    cv2.waitKey(1)  # Necessary to show image


# Use callback function for 60s
cam.on_frame = show_gray_image
sleep(60)


# Use get_frame indefinitely
try:
    while True:
        show_gray_image(cam.get_frame())

except KeyboardInterrupt:
    cv2.destroyAllWindows()
示例#4
0
from pitop import Camera, Pitop
from pitop.labs import WebController

photobooth = Pitop()
photobooth.add_component(Camera())


def save_photo(data):
    photobooth.camera.current_frame().save(f'{data["name"]}.jpg')


controller = WebController(get_frame=photobooth.camera.get_frame,
                           message_handlers={"save_photo": save_photo})

controller.serve_forever()
示例#5
0
from PIL import ImageDraw

from pitop import Camera

cam = Camera()


def draw_red_cross_over_image(im):
    # Use Pillow to draw a red cross over the image
    draw = ImageDraw.Draw(im)
    draw.line((0, 0) + im.size, fill=128, width=5)
    draw.line((0, im.size[1], im.size[0], 0), fill=128, width=5)
    return im


im = draw_red_cross_over_image(cam.get_frame())
im.show()
示例#6
0
from pitop import Camera
from pitop.labs import MessagingBlueprint, VideoBlueprint, WebServer
from pitop.processing.algorithms import BallDetector, process_frame_for_line

camera = Camera()
ball_detector = BallDetector(format="PIL")

selected_image_processor = "line_detect"


def change_processor(new_image_processor):
    global selected_image_processor
    selected_image_processor = new_image_processor


def get_processed_frame():
    try:
        if selected_image_processor == "line_detect":
            return process_frame_for_line(camera.current_frame()).robot_view

        if selected_image_processor == "ball_detect":
            return ball_detector(camera.current_frame()).robot_view
    except Exception:
        return b""


image_processing_explorer = WebServer(
    blueprints=[
        VideoBlueprint(name="raw_video", get_frame=camera.get_frame),
        VideoBlueprint(name="processed_video", get_frame=get_processed_frame),
        MessagingBlueprint(message_handlers={"change_processor": change_processor}),
示例#7
0
from time import sleep

from pitop import Camera

# Record a 10s video to ~/Camera/

cam = Camera()

cam.start_video_capture()
sleep(10)
cam.stop_video_capture()
示例#8
0
from pitop.processing.algorithms.faces import EmotionClassifier, FaceDetector


def detect_emotion(frame):
    face = face_detector(frame)
    emotion = emotion_classifier(face)

    if emotion.found:
        print(f"{emotion_lookup[emotion.type]}", end="\r", flush=True)
    else:
        print("Face not found!")

    cv2.imshow("Emotion", emotion.robot_view)
    cv2.waitKey(1)


camera = Camera(resolution=(640, 480), flip_top_bottom=True)

face_detector = FaceDetector()
emotion_classifier = EmotionClassifier()
emotion_types = emotion_classifier.emotion_types
ascii_emotions = [":|", ":c", "D:<", ":)", ":(", ":O"]
emotion_lookup = {
    emotion_types[i]: ascii_emotions[i]
    for i in range(len(emotion_types))
}

camera.on_frame = detect_emotion

pause()
from time import sleep

import cv2

from pitop import Camera, Pitop

miniscreen = Pitop().miniscreen

cam = Camera(format="OpenCV", flip_top_bottom=True)
directory = "images/"
button = miniscreen.select_button
picture_count = 0

while True:
    frame = cam.get_frame()

    cv2.imshow("Frame", frame)
    miniscreen.display_image(frame)
    if button.is_pressed:
        cv2.imwrite(f"{directory}image_{picture_count}.jpg", frame)
        print(f"Frame written to file with ID: {picture_count}\n")
        picture_count += 1
        sleep(0.5)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

cv2.destroyAllWindows()
示例#10
0
        draw_emoji(
            canvas=canvas, emoji="Looking", bounding_box=wide_bounding_box, offset_y=5
        )
        draw_text(canvas=canvas, text="Looking...")

    robot.miniscreen.display_image(image)


def run_emotion_and_face_detector(frame):
    face = face_detector(frame)
    emotion = emotion_classifier(face)
    update_displayed_image(face, emotion)


robot = Pitop()
camera = Camera(resolution=(640, 480), flip_top_bottom=True)
robot.add_component(camera)

image = Image.new(
    robot.miniscreen.mode,
    robot.miniscreen.size,
)

w_ms, h_ms = robot.miniscreen.size

sample_text = "100% Happy"
_, top_section_height = text_font.getsize(sample_text)

left_bounding_box = (0, top_section_height, w_ms // 2, h_ms - top_section_height)
right_bounding_box = (
    w_ms // 2,
示例#11
0
from pitop import Camera, PanTiltController
from pitop.processing.algorithms.faces import FaceDetector


def track_face(frame):
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        face_center = face.center
        pan_tilt.track_object(face_center)
        print(f"Face center: {face_center}")
    else:
        pan_tilt.track_object.stop()
        print("Cannot find face!")


face_detector = FaceDetector()

pan_tilt = PanTiltController(servo_pan_port="S0", servo_tilt_port="S3")
pan_tilt.tilt_servo.target_angle = 0
pan_tilt.pan_servo.target_angle = 0

camera = Camera(resolution=(640, 480))
camera.on_frame = track_face

pause()
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        face_angle = face.angle
        robot.head.track_head_angle(face_angle)
        print(f"Face angle: {face.angle}")
    else:
        robot.head.roll.sweep(speed=0)
        print("Cannot find face!")


robot = Pitop()

robot.add_component(
    TiltRollHeadController(servo_roll_port="S0", servo_tilt_port="S3"))
robot.head.calibrate()
robot.head.tilt.target_angle = 70
robot.head.roll.target_angle = 0

robot.add_component(Camera(resolution=(640, 480), flip_top_bottom=True))

face_detector = FaceDetector()

robot.camera.on_frame = track_face

pause()
示例#13
0
from signal import pause

from pitop import Camera, DriveController, Pitop
from pitop.processing.algorithms.line_detect import process_frame_for_line

# Assemble a robot
robot = Pitop()
robot.add_component(
    DriveController(left_motor_port="M3", right_motor_port="M0"))
robot.add_component(Camera())


# Set up logic based on line detection
def drive_based_on_frame(frame):
    processed_frame = process_frame_for_line(frame)

    if processed_frame.line_center is None:
        print("Line is lost!", end="\r")
        robot.drive.stop()
    else:
        print(f"Target angle: {processed_frame.angle:.2f} deg ", end="\r")
        robot.drive.forward(0.25, hold=True)
        robot.drive.target_lock_drive_angle(processed_frame.angle)
        robot.miniscreen.display_image(processed_frame.robot_view)


# On each camera frame, detect a line
robot.camera.on_frame = drive_based_on_frame

pause()
from pitop import Camera, Pitop

camera = Camera()
pitop = Pitop()
camera.on_frame = pitop.miniscreen.display_image
from datetime import datetime
from time import localtime, sleep, strftime

from pitop import Camera

# Example code for Camera
# Records videos of any motion captured by the camera

cam = Camera()

last_motion_detected = None


def motion_detected():
    global last_motion_detected

    last_motion_detected = datetime.now().timestamp()

    if cam.is_recording() is False:

        print("Motion detected! Starting recording...")
        output_file_name = f"/home/pi/Desktop/My Motion Recording {strftime('%Y-%m-%d %H:%M:%S', localtime(last_motion_detected))}.avi"
        cam.start_video_capture(output_file_name=output_file_name)

        while (datetime.now().timestamp() - last_motion_detected) < 3:
            sleep(1)

        cam.stop_video_capture()
        print(f"Recording completed - saved to {output_file_name}")

示例#16
0
from signal import pause

import cv2

from pitop import Camera
from pitop.processing.algorithms.faces import FaceDetector


def find_faces(frame):
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        print(f"Face angle: {face.angle} \n"
              f"Face center: {face.center} \n"
              f"Face rectangle: {face.rectangle} \n")
    else:
        print("Cannot find face!")


camera = Camera(resolution=(640, 480), flip_top_bottom=True)
face_detector = FaceDetector()

camera.on_frame = find_faces

pause()
示例#17
0
from pitop import Camera, DriveController, Pitop
from pitop.labs import RoverWebController

rover = Pitop()
rover.add_component(DriveController())
rover.add_component(Camera())

rover_controller = RoverWebController(
    get_frame=rover.camera.get_frame,
    drive=rover.drive,
)

rover_controller.serve_forever()
from pitop import Camera

cam = Camera()

while True:
    image = cam.get_frame()
    print(image.getpixel((0, 0)))