def getPos(sub): buff = GetModelState() buff.model_name = 'dd_robot' val = sub(buff.model_name, 'world') x_dist = val.pose.position.x y_dist = val.pose.position.y if (x_dist<-20): spawnBox(-25, math.floor(y_dist)-2) spawnBox(-25, math.floor(y_dist)-1) spawnBox(-25, math.floor(y_dist)) spawnBox(-25, math.floor(y_dist)+1) spawnBox(-25, math.floor(y_dist)+2) if (x_dist>20): spawnBox(25, math.floor(y_dist)-2) spawnBox(25, math.floor(y_dist)-1) spawnBox(25, math.floor(y_dist)) spawnBox(25, math.floor(y_dist)+1) spawnBox(25, math.floor(y_dist)+2) if (y_dist<-20): spawnBox(math.floor(x_dist)-2, -25) spawnBox(math.floor(x_dist)-1, -25) spawnBox(math.floor(x_dist) , -25) spawnBox(math.floor(x_dist)+1, -25) spawnBox(math.floor(x_dist)+2, -25) if (y_dist>20): spawnBox(math.floor(x_dist)-2, 25) spawnBox(math.floor(x_dist)-1, 25) spawnBox(math.floor(x_dist) , 25) spawnBox(math.floor(x_dist)+1, 25) spawnBox(math.floor(x_dist)+2, 25)
from geometry_msgs.msg import Point32, Pose, Twist from gp_abstract_sim.msg import path_points from nav_msgs.msg import Odometry from std_msgs.msg import Float32 from tf.transformations import euler_from_quaternion from std_msgs.msg import String from matplotlib import pyplot as plt ########################################################################### sensor =None isdetected =False speed = Twist() state_msg = ModelState() gestate = GetModelState() new_data = Odometry() currentPath = path_points() counter =0 current_x = 0.0 current_y = 0.0 current_yaw = 0.0 roll = 0.0 pitch = 0.0 flagdamn = False flag = False gx = 0.0 gy = 0.0
def get_model_pose(self, model_name): ms = GetModelState(model_name) if ms: return ms.pose else: return None
def cords_callback(self, data): if self.counter < self.sampleSize: if self.counter % (self.sampleSize/20) == 0.0: print (self.counter/self.sampleSize)*100.0, print "%% done sampling" pass self.counter = self.counter + 1 target_pose = geometry_msgs.msg.PoseStamped() target_pose.pose.position.x = data.x_p2/1000.0 target_pose.pose.position.y = data.y_p2/1000.0 target_pose.pose.position.z = data.z_p2/1000.0 target_pose.header.frame_id = self.cameraNameFrame target_pose.header.stamp = rospy.Time.now() try: camera_transforms = self.tfBuffer.lookup_transform(self.rootFrame, self.cameraNameFrame, rospy.Time(0)) target_transformed_pose = tf2_geometry_msgs.do_transform_pose(target_pose, camera_transforms) #rospy.loginfo(target_transformed_pose.pose.position) self.poses.append(target_transformed_pose.pose.position) self.x.append(target_transformed_pose.pose.position.x) self.y.append(target_transformed_pose.pose.position.y) self.z.append(target_transformed_pose.pose.position.z) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.spin() rospy.loginfo("Failed lookup") return else: if not self.listedInfo: #show output self.listedInfo = True #print(poseArray) #np.savetxt("1.csv", poseArray, delimiter=',') #get actual face position from gazebo service: try: service = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState) req = GetModelState._request_class() req.model_name = 'human' req.relative_entity_name = '' resp = service(req) except rospy.ServiceException, e: print("Service call failed: %s" %e) print(resp.pose.position) poseArray = np.asarray(self.poses) xArray = (np.asarray(self.x)-resp.pose.position.x)*1000.0 yArray = (np.asarray(self.y)-resp.pose.position.y)*1000.0 zArray = (np.asarray(self.z)-resp.pose.position.z)*1000.0 meanX, sigmaX = np.mean(xArray), np.std(xArray) conf_intX = stats.norm.interval(0.95, loc=meanX, scale=sigmaX/np.sqrt(len(xArray))) conf_intX = conf_intX[0] - conf_intX[1] meanY, sigmaY = np.mean(yArray), np.std(yArray) conf_intY = stats.norm.interval(0.95, loc=meanY, scale=sigmaY/np.sqrt(len(yArray))) conf_intY = conf_intY[0] - conf_intY[1] meanZ, sigmaZ = np.mean(zArray), np.std(zArray) conf_intZ = stats.norm.interval(0.95, loc=meanZ, scale=sigmaZ/np.sqrt(len(zArray))) conf_intZ = conf_intZ[0] - conf_intZ[1] print "X: ", print round(meanX, 1), print '+-', print round(abs(conf_intX), 1), print ', std.dev: ', print round(sigmaX, 1) print "Y: ", print round(meanY, 1), print '+-', print round(abs(conf_intY), 1), print ', std.dev: ', print round(sigmaY, 1) print "Z: ", print round(meanZ, 1), print '+-', print round(abs(conf_intZ), 1), print ', std.dev: ', print round(sigmaZ, 1)
def runTest(og_pose, lights): ## face detection recline_max = 90 # 50 degrees recline_min = 0 recline_step = 9 sideways_min = -80 # -45 degrees sideways_max = 80 # 45 degrees sideways_step = 16 scaling = 100.0 rad2deg = 180.0/3.14 sleepTime = 1 # ONLY MOVE MODEL FOR PICTURE TAKING??? if(False): theta_ud = -recline_max/scaling theta_lr = sideways_max/scaling moveModel('human', og_pose.position, -recline_max/scaling, 0.0, sideways_min/scaling) return ## landmark detection rootFrame = 'world' cameraNameFrame = 'color_corrected_frame' tfBuffer = tf2_ros.Buffer() tf_listener = tf2_ros.TransformListener(tfBuffer) x = [] y = [] z = [] global images_processed #wait until images_processed topic has been received once: while(images_processed < 0): rospy.sleep(1) start_images_processed = images_processed faces_detected = 0 #loop through lighting settings: dataArrayArray = [] for i in range(0,len(lights)): chooseLight(lights, i) print "lighting setting ", print i dataArray = [] for up in range(recline_min, recline_max, recline_step): #theta for up/down angle theta_ud = -up/scaling data = [] #save y for graphing for lr in range(sideways_min, sideways_max, sideways_step): #empty face_coordinates list: face_coordinates[:] = [] #for a in face_coordinates: # face_coordinates.pop() #theta for left/right angle theta_lr = lr/scaling #move the model moveModel('human', og_pose.position, theta_ud, 0.0, theta_lr) #give time to detect face startTime = cv2.getTickCount() while((cv2.getTickCount()-startTime)/cv2.getTickFrequency() < sleepTime): rospy.sleep(0.001) #once we have slept, remove noice for point in face_coordinates: if point.z_p1 > 1000: face_coordinates.pop(face_coordinates.index(point)) #print("removed some noise") faces_detected = faces_detected + len(face_coordinates) #save amount of detections (face detections) data.append(len(face_coordinates)) print "faces detected in this pos: ", print len(face_coordinates) #calculate position relative to world of face_cords for faces in face_coordinates: target_pose = PoseStamped() target_pose.pose.position.x = faces.x_p2/1000.0 target_pose.pose.position.y = faces.y_p2/1000.0 target_pose.pose.position.z = faces.z_p2/1000.0 target_pose.header.frame_id = cameraNameFrame target_pose.header.stamp = rospy.Time.now() try: camera_transforms = tfBuffer.lookup_transform(rootFrame, cameraNameFrame, rospy.Time(0)) target_transformed_pose = tf2_geometry_msgs.do_transform_pose(target_pose, camera_transforms) x.append(target_transformed_pose.pose.position.x) y.append(target_transformed_pose.pose.position.y) z.append(target_transformed_pose.pose.position.z) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.spin() rospy.loginfo("Failed lookup") return dataArray.append(data) dataArrayArray.append(dataArray) end_images_processed = images_processed #calculate final detection rate, on a per frame basis: total_images_processed = end_images_processed - start_images_processed detection_rate = float(faces_detected) / float(total_images_processed) detection_rate = round(detection_rate * 100.0, 2) print "", print "images processed: ", print total_images_processed, print ", detected faces: ", print faces_detected, print ", face detection rate: ", print detection_rate, print "%" ## landmark detection results try: service = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState) req = GetModelState._request_class() req.model_name = 'human' req.relative_entity_name = '' resp = service(req) except rospy.ServiceException, e: print("Service call failed: %s" %e)