def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    detector = dlib.get_frontal_face_detector()

    # Loop
    while True:
        # Get image
        # Second parameter is upsample_num_times.
        img = camera.getImage()

        #TODO: get face detections using dlib detector
        dets = detector(img, 1)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        #show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #2
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #3
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # The variable for counting loop
    cnt = 0

    #Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        if (len(dets) > 0):
            face_tracking = None
            distanceFromCenter_min = 1000
            # Find a face near image center
            for face in dets:
                # Draw Rectangle
                cv2.rectangle(img, (face.left(), face.top()),
                              (face.right(), face.bottom()), color_green, 3)

                face_x = (face.left() + face.right()) / 2

                #TODO: write a distance between face and center, center is 0.5*width of image.
                distanceFromCenter = abs(face_x - camera.width / 2)

                # Find a face that has the smallest distance
                if distanceFromCenter < distanceFromCenter_min:
                    distanceFromCenter_min = distanceFromCenter
                    face_tracking = face

            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, face_tracking)
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #4
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # right direction threshold
    right_threshold = 0.3
    left_threshold = -0.3
    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #TODO: find the yaw value from the rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]
            print(yaw)

            #TODO: insert the condition for looking right
            if yaw > right_threshold:
                print('You are looking at right.')
            #TODO: insert the condition for looking left
            elif yaw < left_threshold:
                print('You are looking at left.')

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    detector = dlib.get_frontal_face_detector()

    # The variable for counting loop
    cnt = 0

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = detector(img, 1)
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        if (len(dets) > 0):
            tracked_face = dets[0]
            tracked_face_x = (tracked_face.left() + tracked_face.right()) / 2
            tracked_face_y = (tracked_face.top() + tracked_face.bottom()) / 2
            #TODO: convert 2d point to 3d point on the camera coordinates system

            #TODO: convert the 3d point on the camera point system to the robot coordinates system

            #TODO: move the robot so that it tracks the face

        # Show image
        cv2.imshow("Frame", img[..., ::-1])

        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initialize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Loop
    while True:
        #TODO: get image from camera, getImage returns an image
        img = 
        # Use opencv to show image on window named "Frame"
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            #TODO: estimate pose of a detected face
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)
            print("rotation_vector")
            print rotation_vector
            print("translation_vector")
            print translation_vector
        #show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    focal_length = 640

    camera.start()

    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    #counter
    cnt = 1
    #loop
    while True:
        #get image
        img = camera.getImage()

        #gets face detections
        dets = face_detector.detect(img)

        #draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        #we only use 1 face to estimate pose
        if (len(dets) > 0):
            det0 = dets[0]
            #estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, det0)
            #draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #converts 2d coordinates to 3d coordinates on camera axis
            (x, y, z) = camera.convert2d_3d((det0.left() + det0.right()) / 2,
                                            (det0.top() + det0.bottom()) / 2)
            print(x, y, z, 'on camera axis')
            #converts 3d coordinates on camera axis to 3d coordinates on robot axis
            (x, y, z) = camera.convert3d_3d(x, y, z)
            print(x, y, z, 'on robot axis')
            #move robot
            robot.lookatpoint(x, y, z, 4)

        cv2.imshow("Frame", img[..., ::-1])
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    #loop
    while True:
        # Get image
        img = camera.getImage()
        # Gets image with ball detected,
        (img, center) = ball_detector.detect(img, 640)
        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Print the center
        print(center)
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #10
0
#!/usr/bin/env python
import cv2
import sys
from example_2_ball_detector import BallDetector
sys.path.append('..')
from lib.camera_v2 import Camera
from lib.robot import Robot
from lib.ros_environment import ROSEnvironment

# Initalize camera
camera = Camera()
# Initalize robot
robot = Robot()


def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Get image with ball detected,
        (img, center) = ball_detector.detect(img, 640)
コード例 #11
0
def main():
    ROSEnvironment()
    camera = Camera()
    camera.start()
    robot = Robot()
    robot.start()

    # Get image from camera
    cam_image = camera.getImage()

    # Get width and height of image
    input_image = cam_image
    width = input_image.shape[1]
    height = input_image.shape[0]

    # Check deep neural network with weight and configure file
    net = cv2.dnn.readNet(weight_path, cfg_path)

    #creates a "bob" that is the input image after mean subtraction, normalizing, channel swapping
    #0.00392 is the scale factor
    #(416,416) is the size of the output image
    #(0,0,0) are the mean values that will be subtracted for each channel RGB
    blob = cv2.dnn.blobFromImage(input_image,
                                 0.00392, (416, 416), (0, 0, 0),
                                 True,
                                 crop=False)

    #Inputs blob into the neural network
    net.setInput(blob)

    #gets the output layers "yolo_82', 'yolo_94', 'yolo_106"
    #output layer contains the detection/prediction information
    layer_names = net.getLayerNames()
    #getUnconnectedOutLayers() returns indices of unconnected layers
    #layer_names[i[0] - 1] gets the name of the layers of the indices
    #net.getUnconnectedOutLayers() returns [[200], [227], [254]]
    output_layers = [
        layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()
    ]

    #Runs forward pass to compute output of layer
    #returns predictions/detections at 32, 16 and 8 scale
    preds = net.forward(output_layers)

    #Initialize list that contains class id, confidence values, bounding boxes
    class_ids = []
    confidence_values = []
    bounding_boxes = []

    # TODO: initialize confidence threshold and threshold for non maximal suppresion
    conf_threshold = 0.5
    nms_threshold = 0.4

    #for each scale, we go through the detections
    for pred in preds:
        for detection in pred:
            #Use the max score as confidence
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            #Check if confidence is greather than threshold
            if confidence > conf_threshold:
                #Compute x,y, widht, height, class id, confidence value
                center_x = int(detection[0] * width)
                center_y = int(detection[1] * height)
                w = int(detection[2] * width)
                h = int(detection[3] * height)
                x = center_x - w / 2
                y = center_y - h / 2
                class_ids.append(class_id)
                confidence_values.append(float(confidence))
                bounding_boxes.append([x, y, w, h])

    # check your threshold for non maximal suppression
    indices = cv2.dnn.NMSBoxes(bounding_boxes, confidence_values,
                               conf_threshold, nms_threshold)

    #draw results
    #tracked_object flag for if object is already tracked
    tracked_object = 0
    for i in indices:
        i = i[0]
        box = bounding_boxes[i]
        x = box[0]
        y = box[1]
        w = box[2]
        h = box[3]
        center_x = x + w / 2.0
        center_y = y + h / 2.0
        classid = class_ids[i]
        class_name = str(classes[classid])

        print(class_name)
        conf_value = confidence_values[i]
        draw_boundingbox(input_image, classid, conf_value, round(x), round(y),
                         round(x + w), round(y + h))

    #show image
    cv2.imshow("Object Detection Window", input_image[..., ::-1])
    key = cv2.waitKey(0)
    cv2.imwrite("detected_object.jpg", input_image)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0, 0.5)

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #TODO: find a yaw value from rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #TODO: remember current position
            print("Pan angle is ",
                  robot.getPosition()[0], "Tilt angle is",
                  robot.getPosition()[1])
            current_pan = robot.getPosition()[0]
            current_tilt = robot.getPosition()[1]

            #TODO: insert the condition for looking at right
            if yaw > 0.3:
                print('You are looking at right.')
                #TODO: add motion for looking at right
                robot.move(0.5, 0.5)

            #TODO: insert the condition for looking at left
            elif yaw < -0.3:
                print('You are looking at left.')
                #TODO: add motion for looking at left
                robot.move(-0.5, 0.5)

            time.sleep(3)

            #TODO: Looking at the position that is stored.
            robot.move(current_pan, current_tilt)
            time.sleep(5)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
コード例 #13
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0,0.3)


    # the time when motion runs
    motion_start_time = None

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if(len(dets)>0):
            # Estimate pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #The yaw value is the 2nd value in the rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #prints the current position
            print ("Pan angle is ",robot.getPosition()[0], "Tilt angle is", robot.getPosition()[1])

            #condition when the user is looking right
            if (yaw > 0.3 and motion_start_time == None):
                print ('You are looking at right.')
                #TODO: store the current position in current_pan and current_tilt
                current_pos = robot.getPosition()
                current_pan =
                current_tilt =

                #TODO: add motion for looking right
                robot.move(,)

                motion_start_time = current_time()

            #condition when the user is looking left
            elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking at left.')
                    #TODO: store the current position in current_pan and current_tilt
                    current_pos = robot.getPosition()
                    current_pan =
                    current_tilt =

                    #TODO: add motion for looking at left
                    robot.move(,)

                    motion_start_time = current_time()
            if(motion_start_time !=None):
                print current_time()- motion_start_time

        # After the motion runs, check if 3 seconds have passed.
        if(motion_start_time != None and current_time()-motion_start_time > 3 ):
            #TODO: move the robot so that it returns to the stored current position
            robot.move(, )
            motion_start_time = None

        sleep(0.05)
        # Show image
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    faceInCenter_count = 0
    current_pan = 0
    current_tilt = 0
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    camera.start()
    #Initalize robot
    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
    #face detection result
    dets = None
    # current tracking state
    Tracking = False
    # the time when motion for looking left/right runs
    motion_start_time = None
    cnt = 0
    #loop
    while True:
        #get image
        img = camera.getImage()
        if frame_skip(img):
            continue
        #gets detect face
        dets = face_detector.detect(img)

        #If the number of face detected is greater than 0
        if len(dets)>0:

            #We select the first face detected
            tracked_face  =  dets[0]
            #Get the x, y position
            tracked_face_X = (tracked_face.left()+tracked_face.right())/2
            tracked_face_y = (tracked_face.top()+tracked_face.bottom())/2

            # Estimate head pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, tracked_face)

            # Draw bounding box
            cv2.rectangle(img,(tracked_face.left(), tracked_face.top()), (tracked_face.right(), tracked_face.bottom()), color_green, 3)
            # Draw head pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #Check if head is in the center, returns how many times the head was in the center
            faceInCenter_count =faceInCenter(camera, tracked_face_X, tracked_face_y, faceInCenter_count)
            print faceInCenter_count
            print ("{} in the center for {} times".format("Face as been", (faceInCenter_count))  )

            #We track when the head is in the center for a certain period of time and there is not head motion activated
            if(faceInCenter_count<5 and motion_start_time == None):
                Tracking = True
            else:
                Tracking = False

            #Start tracking
            if Tracking:
                print("Tracking the Person")
                #TODO: converts 2d coordinates to 3d coordinates on camera axis
                (x,y,z) = camera.convert2d_3d(tracked_face_X, tracked_face_y)
                #TODO: converts 3d coordinates on camera axis to 3d coordinates on robot axis
                (x,y,z) = camera.convert3d_3d(x,y,z)
                #TODO: move robot to track your face
                robot.lookatpoint(x,y,z, 1)

            #When tracking is turned off, estimate the head pose and perform head motion if conditions meet
            elif Tracking is False:
                print "Stopped Tracking, Starting Head Pose Estimation"
                # yaw is angle of face on z-axis
                yaw = rotation_vector[2]

                #Condition for user looking towards the right
                if (yaw > 0.3 and motion_start_time == None):
                    print ('You are looking towards the right.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look right"
                    #TODO: add motion for looking right
                    robot.move(0.8,0.5)
                    motion_start_time = current_time()

                #Condition for user looking towards the left
                elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking towards the left.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look left"
                    #TODO: add motion for looking left
                    robot.move(-0.8,0.5)
                    motion_start_time = current_time()

        #When head motion is activated we start the counter
        if(motion_start_time != None):
            print ("{} and its been {} seconds".format("Look motion activated ", (current_time()-motion_start_time))  )

        #After 3 seconds, we have to return to the current position
        if(motion_start_time != None and ((current_time()-motion_start_time) > 3) ):
            #Looking at the position that is stored.
            print "Robot is going back "
            #TODO: make the robot move to the stored current position
            robot.move(current_pan, current_tilt)
            motion_start_time = None
            #Tracking = True

        #Start tracking again
        if (cnt>10 and motion_start_time == None):
            Tracking = True
            cnt = cnt+1
        sleep(0.08)

        #show image
        cv2.imshow("Frame", img[...,::-1])
        #Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break