Пример #1
0
class client:
    def __init__(self):

        self.robot = Robot()
        #rospy.Timer(rospy.Duration(1.0), self.robotUpdate)

    def robotUpdate(self, event):
        # self.robot.test()
        self.robot.move(0, 0)
Пример #2
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    # TODO: change the values in move
    robot.move(0, 0)
    time.sleep(0.1)
Пример #3
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    time.sleep(1)  # wait a second

    #TODO: remember the current position
    curr_pos = robot.getPosition()
    curr_pan = curr_pos[0]
    curr_tilt = curr_pos[1]

    #TODO: look somewhere else other than the current position
    robot.move(-0.3, -0.3)
    time.sleep(1)  # wait a second

    #TODO: return back to looking at the current position stored
    robot.move(curr_pan, curr_tilt)
    time.sleep(1)  # wait a second
Пример #4
0
class BallTrackingSystem:
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0

        # init
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera(self.imageCallback)

        # move robot head to center
        self.robot.center()

        # load timer
        rospy.Timer(rospy.Duration(1.0 / self.rate), self.robotUpdate)

    def robotUpdate(self, event):
        # self.robot.test()
        self.robot.move(self.pan_speed, self.tilt_speed)

    def imageCallback(self, frame):
        results = self.ball_detector.detect(frame, 640)
        frame = results[0]
        if results[1] != None:
            self.set_joint_cmd(results)
        return frame

    def set_joint_cmd(self, msg):

        ball = msg[1]
        frame = msg[0]
        (image_height, image_width) = frame.shape[:2]

        target_offset_x = ball[0] - image_width / 2
        target_offset_y = ball[1] - image_height / 2

        try:
            percent_offset_x = float(target_offset_x) / (float(image_width) /
                                                         2.0)
            percent_offset_y = float(target_offset_y) / (float(image_height) /
                                                         2.0)
        except:
            percent_offset_x = 0
            percent_offset_y = 0

        if abs(percent_offset_x) > self.pan_threshold:
            if target_offset_x > 0:
                self.pan_speed = min(
                    self.max_joint_speed,
                    max(0, self.gain_pan * abs(percent_offset_x)))
            else:
                self.pan_speed = -1 * min(
                    self.max_joint_speed,
                    max(0, self.gain_pan * abs(percent_offset_x)))
        else:
            self.pan_speed = 0

        if abs(percent_offset_y) > self.tilt_threshold:
            if target_offset_y > 0:
                self.tilt_speed = min(
                    self.max_joint_speed,
                    max(0, self.gain_tilt * abs(percent_offset_y)))
            else:
                self.tilt_speed = -1 * min(
                    self.max_joint_speed,
                    max(0, self.gain_tilt * abs(percent_offset_y)))
        else:
            self.tilt_speed = 0

    def initParam(self):
        self.rate = rospy.get_param("~rate", 20)

        # Joint speeds are given in radians per second
        self.max_joint_speed = rospy.get_param('~max_joint_speed', 0.1)

        # The pan/tilt thresholds indicate what percentage of the image window
        # the ROI needs to be off-center before we make a movement
        self.pan_threshold = rospy.get_param("~pan_threshold", 0.05)
        self.tilt_threshold = rospy.get_param("~tilt_threshold", 0.05)

        # The gain_pan and gain_tilt parameter determine how responsive the
        # servo movements are. If these are set too high, oscillation can result.
        self.gain_pan = rospy.get_param("~gain_pan", 1.0)
        self.gain_tilt = rospy.get_param("~gain_tilt", 1.0)

        self.max_pan_angle_radian = rospy.get_param("~max_pan_angle_radian",
                                                    1.0)
        self.max_tilt_angle_radian = rospy.get_param("~max_tilt_angle_radian",
                                                     1.0)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0, 0.5)

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #TODO: find a yaw value from rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #TODO: remember current position
            print("Pan angle is ",
                  robot.getPosition()[0], "Tilt angle is",
                  robot.getPosition()[1])
            current_pan = robot.getPosition()[0]
            current_tilt = robot.getPosition()[1]

            #TODO: insert the condition for looking at right
            if yaw > 0.3:
                print('You are looking at right.')
                #TODO: add motion for looking at right
                robot.move(0.5, 0.5)

            #TODO: insert the condition for looking at left
            elif yaw < -0.3:
                print('You are looking at left.')
                #TODO: add motion for looking at left
                robot.move(-0.5, 0.5)

            time.sleep(3)

            #TODO: Looking at the position that is stored.
            robot.move(current_pan, current_tilt)
            time.sleep(5)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Пример #6
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    robot.move(0,0.3)


    # the time when motion runs
    motion_start_time = None

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if(len(dets)>0):
            # Estimate pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #The yaw value is the 2nd value in the rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]

            #prints the current position
            print ("Pan angle is ",robot.getPosition()[0], "Tilt angle is", robot.getPosition()[1])

            #condition when the user is looking right
            if (yaw > 0.3 and motion_start_time == None):
                print ('You are looking at right.')
                #TODO: store the current position in current_pan and current_tilt
                current_pos = robot.getPosition()
                current_pan =
                current_tilt =

                #TODO: add motion for looking right
                robot.move(,)

                motion_start_time = current_time()

            #condition when the user is looking left
            elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking at left.')
                    #TODO: store the current position in current_pan and current_tilt
                    current_pos = robot.getPosition()
                    current_pan =
                    current_tilt =

                    #TODO: add motion for looking at left
                    robot.move(,)

                    motion_start_time = current_time()
            if(motion_start_time !=None):
                print current_time()- motion_start_time

        # After the motion runs, check if 3 seconds have passed.
        if(motion_start_time != None and current_time()-motion_start_time > 3 ):
            #TODO: move the robot so that it returns to the stored current position
            robot.move(, )
            motion_start_time = None

        sleep(0.05)
        # Show image
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    faceInCenter_count = 0
    current_pan = 0
    current_tilt = 0
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    camera.start()
    #Initalize robot
    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
    #face detection result
    dets = None
    # current tracking state
    Tracking = False
    # the time when motion for looking left/right runs
    motion_start_time = None
    cnt = 0
    #loop
    while True:
        #get image
        img = camera.getImage()
        if frame_skip(img):
            continue
        #gets detect face
        dets = face_detector.detect(img)

        #If the number of face detected is greater than 0
        if len(dets)>0:

            #We select the first face detected
            tracked_face  =  dets[0]
            #Get the x, y position
            tracked_face_X = (tracked_face.left()+tracked_face.right())/2
            tracked_face_y = (tracked_face.top()+tracked_face.bottom())/2

            # Estimate head pose
            (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, tracked_face)

            # Draw bounding box
            cv2.rectangle(img,(tracked_face.left(), tracked_face.top()), (tracked_face.right(), tracked_face.bottom()), color_green, 3)
            # Draw head pose
            img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points)

            #Check if head is in the center, returns how many times the head was in the center
            faceInCenter_count =faceInCenter(camera, tracked_face_X, tracked_face_y, faceInCenter_count)
            print faceInCenter_count
            print ("{} in the center for {} times".format("Face as been", (faceInCenter_count))  )

            #We track when the head is in the center for a certain period of time and there is not head motion activated
            if(faceInCenter_count<5 and motion_start_time == None):
                Tracking = True
            else:
                Tracking = False

            #Start tracking
            if Tracking:
                print("Tracking the Person")
                #TODO: converts 2d coordinates to 3d coordinates on camera axis
                (x,y,z) = camera.convert2d_3d(tracked_face_X, tracked_face_y)
                #TODO: converts 3d coordinates on camera axis to 3d coordinates on robot axis
                (x,y,z) = camera.convert3d_3d(x,y,z)
                #TODO: move robot to track your face
                robot.lookatpoint(x,y,z, 1)

            #When tracking is turned off, estimate the head pose and perform head motion if conditions meet
            elif Tracking is False:
                print "Stopped Tracking, Starting Head Pose Estimation"
                # yaw is angle of face on z-axis
                yaw = rotation_vector[2]

                #Condition for user looking towards the right
                if (yaw > 0.3 and motion_start_time == None):
                    print ('You are looking towards the right.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look right"
                    #TODO: add motion for looking right
                    robot.move(0.8,0.5)
                    motion_start_time = current_time()

                #Condition for user looking towards the left
                elif (yaw < -0.3 and motion_start_time == None):
                    print ('You are looking towards the left.')
                    #TODO: Remember the current position
                    current_position = robot.getPosition()
                    current_pan = current_position[0]
                    current_tilt = current_position[1]
                    print "Starting head motion to look left"
                    #TODO: add motion for looking left
                    robot.move(-0.8,0.5)
                    motion_start_time = current_time()

        #When head motion is activated we start the counter
        if(motion_start_time != None):
            print ("{} and its been {} seconds".format("Look motion activated ", (current_time()-motion_start_time))  )

        #After 3 seconds, we have to return to the current position
        if(motion_start_time != None and ((current_time()-motion_start_time) > 3) ):
            #Looking at the position that is stored.
            print "Robot is going back "
            #TODO: make the robot move to the stored current position
            robot.move(current_pan, current_tilt)
            motion_start_time = None
            #Tracking = True

        #Start tracking again
        if (cnt>10 and motion_start_time == None):
            Tracking = True
            cnt = cnt+1
        sleep(0.08)

        #show image
        cv2.imshow("Frame", img[...,::-1])
        #Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break