def main(): # We need to initalize ROS environment for Robot and camera to connect/communicate ROSEnvironment() # Initalize robot robot = Robot() # Start robot robot.start() time.sleep(1) # wait a second #TODO: remember the current position curr_pos = robot.getPosition() curr_pan = curr_pos[0] curr_tilt = curr_pos[1] #TODO: look somewhere else other than the current position robot.move(-0.3, -0.3) time.sleep(1) # wait a second #TODO: return back to looking at the current position stored robot.move(curr_pan, curr_tilt) time.sleep(1) # wait a second
def main(): # We need to initalize ROS environment for Robot and camera to connect/communicate ROSEnvironment() # Initalize camera camera = Camera() # Start camera camera.start() # Initalize face detector face_detector = FaceDetector() predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat') # Initalize robot robot = Robot() # Start robot robot.start() robot.move(0, 0.5) # Loop while True: # Get image img = camera.getImage() # Get face detections dets = face_detector.detect(img) # Draw all face detections for det in dets: cv2.rectangle(img, (det.left(), det.top()), (det.right(), det.bottom()), color_green, 3) # We only use 1 face to estimate pose if (len(dets) > 0): # Estimate pose (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, dets[0]) # Draw pose img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points) #TODO: find a yaw value from rotation_vector print rotation_vector yaw = rotation_vector[2] #TODO: remember current position print("Pan angle is ", robot.getPosition()[0], "Tilt angle is", robot.getPosition()[1]) current_pan = robot.getPosition()[0] current_tilt = robot.getPosition()[1] #TODO: insert the condition for looking at right if yaw > 0.3: print('You are looking at right.') #TODO: add motion for looking at right robot.move(0.5, 0.5) #TODO: insert the condition for looking at left elif yaw < -0.3: print('You are looking at left.') #TODO: add motion for looking at left robot.move(-0.5, 0.5) time.sleep(3) #TODO: Looking at the position that is stored. robot.move(current_pan, current_tilt) time.sleep(5) # Show image cv2.imshow("Frame", img[..., ::-1]) # Close if key is pressed key = cv2.waitKey(1) if key > 0: break
def main(): # We need to initalize ROS environment for Robot and camera to connect/communicate ROSEnvironment() # Initalize camera camera = Camera() # Start camera camera.start() # Initalize face detector face_detector = FaceDetector() predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat') # Initalize robot robot = Robot() # Start robot robot.start() robot.move(0,0.3) # the time when motion runs motion_start_time = None # Loop while True: # Get image img = camera.getImage() # Get face detections dets = face_detector.detect(img) # Draw all face detections for det in dets: cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, 3) # We only use 1 face to estimate pose if(len(dets)>0): # Estimate pose (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, dets[0]) # Draw pose img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points) #The yaw value is the 2nd value in the rotation_vector print rotation_vector yaw = rotation_vector[2] #prints the current position print ("Pan angle is ",robot.getPosition()[0], "Tilt angle is", robot.getPosition()[1]) #condition when the user is looking right if (yaw > 0.3 and motion_start_time == None): print ('You are looking at right.') #TODO: store the current position in current_pan and current_tilt current_pos = robot.getPosition() current_pan = current_tilt = #TODO: add motion for looking right robot.move(,) motion_start_time = current_time() #condition when the user is looking left elif (yaw < -0.3 and motion_start_time == None): print ('You are looking at left.') #TODO: store the current position in current_pan and current_tilt current_pos = robot.getPosition() current_pan = current_tilt = #TODO: add motion for looking at left robot.move(,) motion_start_time = current_time() if(motion_start_time !=None): print current_time()- motion_start_time # After the motion runs, check if 3 seconds have passed. if(motion_start_time != None and current_time()-motion_start_time > 3 ): #TODO: move the robot so that it returns to the stored current position robot.move(, ) motion_start_time = None sleep(0.05) # Show image cv2.imshow("Frame", img[...,::-1]) # Close if key is pressed key = cv2.waitKey(1) if key > 0: break
def main(): faceInCenter_count = 0 current_pan = 0 current_tilt = 0 #We need to initalize ROS environment for Robot and camera to connect/communicate ROSEnvironment() #Initalize camera camera = Camera() #start camera camera.start() #Initalize robot robot = Robot() #start robot robot.start() #initalize face detector face_detector = FaceDetector() predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat') #face detection result dets = None # current tracking state Tracking = False # the time when motion for looking left/right runs motion_start_time = None cnt = 0 #loop while True: #get image img = camera.getImage() if frame_skip(img): continue #gets detect face dets = face_detector.detect(img) #If the number of face detected is greater than 0 if len(dets)>0: #We select the first face detected tracked_face = dets[0] #Get the x, y position tracked_face_X = (tracked_face.left()+tracked_face.right())/2 tracked_face_y = (tracked_face.top()+tracked_face.bottom())/2 # Estimate head pose (success, rotation_vector, translation_vector, image_points) = face_detector.estimate_pose(img, tracked_face) # Draw bounding box cv2.rectangle(img,(tracked_face.left(), tracked_face.top()), (tracked_face.right(), tracked_face.bottom()), color_green, 3) # Draw head pose img = face_detector.draw_pose(img, rotation_vector, translation_vector, image_points) #Check if head is in the center, returns how many times the head was in the center faceInCenter_count =faceInCenter(camera, tracked_face_X, tracked_face_y, faceInCenter_count) print faceInCenter_count print ("{} in the center for {} times".format("Face as been", (faceInCenter_count)) ) #We track when the head is in the center for a certain period of time and there is not head motion activated if(faceInCenter_count<5 and motion_start_time == None): Tracking = True else: Tracking = False #Start tracking if Tracking: print("Tracking the Person") #TODO: converts 2d coordinates to 3d coordinates on camera axis (x,y,z) = camera.convert2d_3d(tracked_face_X, tracked_face_y) #TODO: converts 3d coordinates on camera axis to 3d coordinates on robot axis (x,y,z) = camera.convert3d_3d(x,y,z) #TODO: move robot to track your face robot.lookatpoint(x,y,z, 1) #When tracking is turned off, estimate the head pose and perform head motion if conditions meet elif Tracking is False: print "Stopped Tracking, Starting Head Pose Estimation" # yaw is angle of face on z-axis yaw = rotation_vector[2] #Condition for user looking towards the right if (yaw > 0.3 and motion_start_time == None): print ('You are looking towards the right.') #TODO: Remember the current position current_position = robot.getPosition() current_pan = current_position[0] current_tilt = current_position[1] print "Starting head motion to look right" #TODO: add motion for looking right robot.move(0.8,0.5) motion_start_time = current_time() #Condition for user looking towards the left elif (yaw < -0.3 and motion_start_time == None): print ('You are looking towards the left.') #TODO: Remember the current position current_position = robot.getPosition() current_pan = current_position[0] current_tilt = current_position[1] print "Starting head motion to look left" #TODO: add motion for looking left robot.move(-0.8,0.5) motion_start_time = current_time() #When head motion is activated we start the counter if(motion_start_time != None): print ("{} and its been {} seconds".format("Look motion activated ", (current_time()-motion_start_time)) ) #After 3 seconds, we have to return to the current position if(motion_start_time != None and ((current_time()-motion_start_time) > 3) ): #Looking at the position that is stored. print "Robot is going back " #TODO: make the robot move to the stored current position robot.move(current_pan, current_tilt) motion_start_time = None #Tracking = True #Start tracking again if (cnt>10 and motion_start_time == None): Tracking = True cnt = cnt+1 sleep(0.08) #show image cv2.imshow("Frame", img[...,::-1]) #Close if key is pressed key = cv2.waitKey(1) if key > 0: break