def _publish(self, *args):
     h = Header()
     h.stamp = rospy.Time.now()
     self.pub.publish(h)
def callback(transform):

    rospy.loginfo(rospy.get_name() + "\nI'm doing ik\n")

    # receive desired end effector positions in mm + theta
    X = transform.translation.x
    Y = transform.translation.y
    Z = transform.translation.z
    Theta = transform.rotation.w
    print transform.translation
    print transform.rotation
    # kinematic constants in mm
    E1, E2, RB, B2, A, B, D = 29.239, 29.239, 55.48, 34.1675, 111.76, 53.98, 8.255
    J = []  # motor positions in radians
    counter = 0
    for m in range(0, 4):
        if m == 0:
            xg = -Y - E1 * (math.sin(Theta) - math.cos(Theta)) + E2
            yg = X + E1 * (math.cos(Theta) + math.sin(Theta)) - B2
            zg = Z
        elif m == 1:
            xg = X + E1 * (math.cos(Theta) + math.sin(Theta)) + E2
            yg = Y + E1 * (math.sin(Theta) - math.cos(Theta)) + B2
            zg = Z
        elif m == 2:
            xg = Y - E1 * (math.sin(Theta) - math.cos(Theta)) + E2
            yg = -X + E1 * (math.cos(Theta) + math.sin(Theta)) - B2
            zg = Z
        elif m == 3:
            xg = -X + E1 * (math.cos(Theta) + math.sin(Theta)) + E2
            yg = -Y + E1 * (math.sin(Theta) - math.cos(Theta)) + B2
            zg = Z

        print "xg " + repr(xg) + " yg " + repr(yg) + " zg " + repr(zg)

        temp = math.pow((A - 2 * D) * (A - 2 * D) - yg * yg, 0.5)
        aPrimeSquared = (temp + 2 * D) * (temp + 2 * D)
        c = math.pow((xg - RB) * (xg - RB) + zg * zg, 0.5)
        #print (-aPrimeSquared+B*B+c*c)/(2*B*c)

        try:
            alpha = math.acos((-aPrimeSquared + B * B + c * c) / (2 * B * c))
            print "worked " + repr(
                (-aPrimeSquared + B * B + c * c) / (2 * B * c))
            counter = counter + 1
        except ValueError:
            print "acos domain error " + repr(
                (-aPrimeSquared + B * B + c * c) / (2 * B * c))
            break
        else:
            beta = math.atan2(zg, xg - RB)
            J.append(beta - alpha)
    print "J:"
    for j in J:
        print " " + repr(j)  # publish sensor_msgs/JointState to cmd_pos topic

    if counter == 4:
        # publish the 4 motor positions in radians
        motors = ("1_A", "1_B", "2_A", "2_B")
        velocity = [0, 0, 0, 0]
        effort = [0, 0, 0, 0]
        pub = rospy.Publisher('cmd_pos', JointState)
        header = Header(0, rospy.get_rostime(), "")
        jointState = JointState(header=header,
                                name=motors,
                                position=J,
                                velocity=velocity,
                                effort=effort)
        pub.publish(jointState)
            resp_cup_ms = cup_ms("cup", "")
            pose_cup = resp_cup_ms.pose
            header_cup = resp_cup_ms.header
            header_cup.frame_id = frameid_var
            poseStamped_cup = PoseStamped(header=header_cup, pose=pose_cup)
            pub_cup_pose.publish(poseStamped_cup)
        except rospy.ServiceException, e:
            rospy.logerr(
                "get_model_state for block service call failed: {0}".format(e))

###################################################################################################
###  START: Gripper stuff

        pose_lglf = None
        pose_lgrf = None
        hdr = Header(frame_id=frameid_var)

        try:
            lglf_link_state = rospy.ServiceProxy('/gazebo/get_link_state',
                                                 GetLinkState)
            resp_lglf_link_state = lglf_link_state('l_gripper_l_finger',
                                                   'world')
            # lglf_reference = resp_lglf_link_state.link_state.reference_frame
            pose_lglf = resp_lglf_link_state.link_state.pose
        except rospy.ServiceException, e:
            rospy.logerr(
                "get_link_state for l_gripper_l_finger: {0}".format(e))
        try:
            lgrf_link_state = rospy.ServiceProxy('/gazebo/get_link_state',
                                                 GetLinkState)
            resp_lgrf_link_state = lgrf_link_state('l_gripper_r_finger',
Beispiel #4
0
#!/usr/bin/env python

import rospy
from nav_msgs.msg import Odometry
from std_msgs.msg import Header
from gazebo_msgs.srv import GetModelState, GetModelStateRequest

rospy.init_node('odom_pub')

odom_pub = rospy.Publisher('/my_odom', Odometry, queue_size=1)

rospy.wait_for_service('/gazebo/get_model_state')
get_model_srv = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)

odom = Odometry()  #instance of messge type odometry
header = Header()
header.frame_id = '/odom'

model = GetModelStateRequest()
model.model_name = 'testrig'

rate = rospy.Rate(10)

while not rospy.is_shutdown():

    result = get_model_srv(model)  #odom info in gazebo

    odom.pose.pose = result.pose
    odom.twist.twist = result.twist

    header.stamp = rospy.Time.now()
Beispiel #5
0
    def scan_received(self, msg):
        """ Returns an occupancy grid to publish data to map"""
        if len(msg.ranges) != 360:
            return

        #make a pose stamp that relates to the odom of the robot
        p = PoseStamped(header=Header(stamp=msg.header.stamp,
                                      frame_id="base_link"),
                        pose=Pose())
        self.odom_pose = self.tf_listener.transformPose("odom", p)
        # convert the odom pose to the tuple (x,y,theta)
        self.odom_pose = TransformHelpers.convert_pose_to_xy_and_theta(
            self.odom_pose.pose)

        time_past = 0

        for degree in range(360):
            if msg.ranges[degree] > 0.0 and msg.ranges[degree] < 5.0:
                #gets the position of the laser data points
                data_x = self.odom_pose[0] + msg.ranges[degree] * math.cos(
                    degree * math.pi / 180.0 + self.odom_pose[2])
                data_y = self.odom_pose[1] + msg.ranges[degree] * math.sin(
                    degree * math.pi / 180 + self.odom_pose[2])

                #maps laser data to a pixel in the map
                datax_pixel = int((data_x - self.origin[0]) / self.resolution)
                datay_pixel = int((data_y - self.origin[1]) / self.resolution)

                #maps the robot to a position
                robot = (data_x - self.odom_pose[0],
                         data_y - self.odom_pose[1])

                #finds how far away the point is from the robot
                magnitude = math.sqrt(robot[0]**2 + robot[1]**2)

                #converts magnitude and robot position to pixels in the map
                n_steps = max([1, int(math.ceil(magnitude / self.resolution))])
                robot_step = (robot[0] / (n_steps - 1),
                              robot[1] / (n_steps - 1))
                marked = set()

                for pixel in range(n_steps):
                    curr_x = self.odom_pose[0] + pixel * robot_step[0]
                    curr_y = self.odom_pose[1] + pixel * robot_step[1]
                    if (self.is_in_map(curr_x, curr_y)):
                        #make sure its in the map
                        break

                    x_ind = int((curr_x - self.origin[0]) / self.resolution)
                    y_ind = int((curr_y - self.origin[1]) / self.resolution)
                    if x_ind == datax_pixel and y_ind == datay_pixel and self.odds_ratios[
                            datax_pixel, datay_pixel] >= 1 / 60.0:
                        #set odds ratio equal to past odds ratio
                        if time_past % 5 == 0:
                            self.past_odds_ratios[
                                datax_pixel,
                                datay_pixel] = self.odds_ratios[datax_pixel,
                                                                datay_pixel]
                            time_past += 1
                        self.odds_ratios[
                            datax_pixel, datay_pixel] *= self.p_occ[
                                datax_pixel, datay_pixel] / (
                                    1 - self.p_occ[datax_pixel, datay_pixel]
                                ) * self.odds_ratio_hit
                    if not ((x_ind, y_ind) in marked
                            ) and self.odds_ratios[x_ind, y_ind] >= 1 / 60.0:
                        #If point isn't marked, update the odds of missing and add to the map
                        if time_past % 5 == 0:
                            self.past_odds_ratios[
                                x_ind, y_ind] = self.odds_ratios[x_ind, y_ind]
                            time_past += 1
                        self.odds_ratios[x_ind,
                                         y_ind] *= self.p_occ[x_ind, y_ind] / (
                                             1 - self.p_occ[x_ind, y_ind]
                                         ) * self.odds_ratio_miss
                        #self.p_occ[x_ind, y_ind] *= self.p_occ[x_ind, y_ind] * self.odds_ratio_miss/self.odds_ratio_hit
                        marked.add((x_ind, y_ind))
                        #print 'New Point'
                if not (self.is_in_map(data_x, data_y)) and self.odds_ratios[
                        data_x, datay_pixel] >= 1 / 60.0:
                    #if it is not in the map, update the odds of hitting it
                    if time_past % 5 == 0:
                        self.past_odds_ratios[datax_pixel,
                                              datay_pixel] = self.odds_ratios[
                                                  datax_pixel, datay_pixel]
                        time_past += 1
                    self.odds_ratios[datax_pixel, datay_pixel] *= self.p_occ[
                        datax_pixel, datay_pixel] / (1 - self.p_occ[
                            datax_pixel, datay_pixel]) * self.odds_ratio_hit

        self.seq += 1
        if self.seq % 10 == 0:
            map = OccupancyGrid()  #this is a nav msg class
            map.header.seq = self.seq
            map.header.stamp = msg.header.stamp
            map.header.frame_id = "map"
            map.header.frame_id = "past_map"
            map.info.origin.position.x = self.origin[0]
            map.info.origin.position.y = self.origin[1]
            map.info.width = self.n
            map.info.height = self.n
            map.info.resolution = self.resolution
            map.data = [
                0
            ] * self.n**2  #the zero is a formatter, not a multiple of 0
            for i in range(self.n):
                #this is giving us the i,j grid square, occupancy grid
                for j in range(self.n):
                    idx = i + self.n * j  #makes horizontal rows (i is x, j is y)
                    if self.odds_ratios[i, j] < 1 / 5.0:
                        map.data[idx] = 0  #makes the gray
                    elif self.odds_ratios[i, j] >= 1 / 5.0 < 0.5:
                        map.data[idx] = 25
                    elif self.odds_ratios[i, j] > 0.5:
                        map.data[idx] = 100  #makes the black walls
                    else:
                        map.data[idx] = -1  #makes unknown
            self.pub.publish(map)
            #TODO: Change display such that we're not just looking at the ratio, but mapping the dynamic archive and current readings

        image1 = np.zeros(
            (self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))
        image2 = np.zeros(
            (self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))

        self.counter += 1

        x_odom_index = int(
            (self.odom_pose[0] - self.origin[0]) / self.resolution)
        y_odom_index = int(
            (self.odom_pose[1] - self.origin[1]) / self.resolution)

        self.pose.append((x_odom_index, y_odom_index))

        #.shape() comes from being related to the np class
        for i in range(image1.shape[0]):
            for j in range(image1.shape[1]):
                #print self.past_odds_ratios[i,j]
                #print self.odds_ratios[i,j]
                #the thing that just rapidly appeared, disappeared!
                delta = (self.odds_ratios[i, j] - self.past_odds_ratios[i, j])
                if (delta < 0.0) and (i, j) in self.rapid_appear:
                    self.dyn_obs.append((i, j, self.counter))

                #whoa buddy, a thing just appeared
                if delta > 1000.0 and (i, j) not in self.rapid_appear:
                    self.rapid_appear.add((i, j))

                if self.odds_ratios[i, j] < 1 / 50.0:
                    image1[i, j, :] = 1.0  #makes open space
                elif self.odds_ratios[i, j] >= 1 / 50.0 and self.odds_ratios[
                        i, j] < 4 / 5.0:
                    image1[i, j, :] = (0, 255, 0)
                elif self.odds_ratios[i, j] > 50.0:
                    image1[i, j, :] = (0, 0, 255)  #makes walls
                else:
                    image1[i, j, :] = 0.5  #not read

        if len(self.dyn_obs) > 0:
            for point in self.dyn_obs:
                if (self.counter - point[2]) <= 100:
                    image2[point[0], point[1]] = (
                        255, 0, 255)  #makes old/dynamic shapes on other map
            graph = image.img_to_graph(image1)
            beta = 5
            eps = 1e-6
            graph.data = np.exp(-beta * graph.data / image1.std()) + eps

            N_REGIONS = 5

            for assign_labels in ('kmeans', 'discretize'):
                t0 = time.time()
            labels = spectral_clustering(graph,
                                         n_clusters=N_REGIONS,
                                         assign_labels=assign_labels,
                                         random_state=1)
            t1 = time.time()
            labels = labels.reshape(lena.shape)

            plt.figure(figsize=(5, 5))
            plt.imshow(image1, cmap=plt.cm.gray)
            for l in range(N_REGIONS):
                plt.contour(labels == l,
                            contours=1,
                            colors=[
                                plt.cm.spectral(l / float(N_REGIONS)),
                            ])
            plt.xticks(())
            plt.yticks(())
            plt.title('Spectral clustering: %s, %.2fs' % (assign_labels,
                                                          (t1 - t0)))

            plt.show()

        for point in self.pose:
            image1[point[0], point[1]] = (255, 0, 0)

        #draw it!
        cv2.circle(image1, (y_odom_index, x_odom_index), 2, (255, 0, 0))
        cv2.imshow("map", cv2.resize(image1, (500, 500)))
        cv2.imshow("past_map", cv2.resize(image2, (500, 500)))
        cv2.waitKey(20)  #effectively a delay
    def left_arm_go_to_pose_goal(self):
        # 设置动作对象变量,此处为arm
        left_arm = self.left_arm

        # 获取当前末端执行器位置姿态
        pose_goal = left_arm.get_current_pose().pose

        # 限制末端夹具运动
        left_joint_const = JointConstraint()
        left_joint_const.joint_name = "gripper_l_joint_r"
        if Leftfinger > -32:
            left_joint_const.position = 0.024
        else:
            left_joint_const.position = 0
        left_joint_const.weight = 1.0

        # 限制末端位移
        left_position_const = PositionConstraint()
        left_position_const.header = Header()
        left_position_const.link_name = "gripper_l_joint_r"
        left_position_const.target_point_offset.x = 0.5
        left_position_const.target_point_offset.y = -0.5
        left_position_const.target_point_offset.z = 1.0
        left_position_const.weight = 1.0

        # # 限制1轴转动
        left_joint1_const = JointConstraint()
        left_joint1_const.joint_name = "yumi_joint_1_l"
        left_joint1_const.position = 0
        left_joint1_const.tolerance_above = 1.76
        left_joint1_const.tolerance_below = 0
        left_position_const.weight = 1.0

        # 限制2轴转动
        left_joint2_const = JointConstraint()
        left_joint2_const.joint_name = "yumi_joint_2_l"
        left_joint2_const.position = 0
        left_joint2_const.tolerance_above = 0
        left_joint2_const.tolerance_below = 150
        left_joint2_const.weight = 1.0

        # 限制3轴转动
        left_joint3_const = JointConstraint()
        left_joint3_const.joint_name = "yumi_joint_3_l"
        left_joint3_const.position = 0
        left_joint3_const.tolerance_above = 35
        left_joint3_const.tolerance_below = 55
        left_joint3_const.weight = 1.0

        # 限制4轴转动
        left_joint4_const = JointConstraint()
        left_joint4_const.joint_name = "yumi_joint_4_l"
        left_joint4_const.position = 0
        left_joint4_const.tolerance_above = 60
        left_joint4_const.tolerance_below = 75
        left_joint4_const.weight = 1.0

        # 限制5轴转动
        right_joint5_const = JointConstraint()
        right_joint5_const.joint_name = "yumi_joint_5_l"
        right_joint5_const.position = 40
        right_joint5_const.tolerance_above = 50
        right_joint5_const.tolerance_below = 20
        right_joint5_const.weight = 1.0

        # 限制6轴转动
        left_joint6_const = JointConstraint()
        left_joint6_const.joint_name = "yumi_joint_6_l"
        left_joint6_const.position = 0
        left_joint6_const.tolerance_above = 10
        left_joint6_const.tolerance_below = 35
        left_joint6_const.weight = 1.0

        # 限制7轴转动
        left_joint7_const = JointConstraint()
        left_joint7_const.joint_name = "yumi_joint_7_l"
        left_joint7_const.position = -10
        left_joint7_const.tolerance_above = 0
        left_joint7_const.tolerance_below = 160
        left_joint7_const.weight = 1.0

        # 限制末端位移
        left_position_const = PositionConstraint()
        left_position_const.header = Header()
        left_position_const.link_name = "gripper_l_joint_r"
        left_position_const.target_point_offset.x = 0.5
        left_position_const.target_point_offset.y = 0.25
        left_position_const.target_point_offset.z = 0.5
        left_position_const.weight = 1.0

        # 添加末端姿态约束:
        left_orientation_const = OrientationConstraint()
        left_orientation_const.header = Header()
        left_orientation_const.orientation = pose_goal.orientation
        left_orientation_const.link_name = "gripper_l_finger_r"
        left_orientation_const.absolute_x_axis_tolerance = 0.5
        left_orientation_const.absolute_y_axis_tolerance = 0.25
        left_orientation_const.absolute_z_axis_tolerance = 0.5
        left_orientation_const.weight = 1

        # 施加全约束
        consts = Constraints()
        consts.joint_constraints = [left_joint_const]
        # consts.orientation_constraints = [left_orientation_const]
        # consts.position_constraints = [left_position_const]
        left_arm.set_path_constraints(consts)

        # 设置动作对象目标位置姿态
        pose_goal.orientation.x = Left_Qux
        pose_goal.orientation.y = Left_Quy
        pose_goal.orientation.z = Left_Quz
        pose_goal.orientation.w = Left_Quw
        pose_goal.position.x = (Neurondata[11] - 0.05) * 1.48 + 0.053
        pose_goal.position.y = (Neurondata[9] - 0.18) * 1.48 + 0.12
        pose_goal.position.z = (Neurondata[10] - 0.53) * 1.48 + 0.47
        left_arm.set_pose_target(pose_goal)
        print "End effector pose %s" % pose_goal

        # # 设置动作对象目标位置姿态
        # pose_goal.orientation.x = pose_goal.orientation.x
        # pose_goal.orientation.y = pose_goal.orientation.y
        # pose_goal.orientation.z = pose_goal.orientation.z
        # pose_goal.orientation.w = pose_goal.orientation.w
        # pose_goal.position.x = pose_goal.position.x
        # pose_goal.position.y = pose_goal.position.y - 0.01
        # pose_goal.position.z = pose_goal.position.z
        # left_arm.set_pose_target(pose_goal)
        # print "End effector pose %s" % pose_goal

        # 规划和输出动作
        traj = left_arm.plan()
        left_arm.execute(traj, wait=False)
        # 动作完成后清除目标信息
        left_arm.clear_pose_targets()
        # 清除路径约束
        left_arm.clear_path_constraints()
        # 确保没有剩余未完成动作在执行
        left_arm.stop()
    def callback_scan(self, data):
        try:
            transform = self.tfBuffer.lookup_transform("odom", "base_link",
                                                       rospy.Time())
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            return

        trans = transform.transform.translation
        rot = transform.transform.rotation
        eular = tf.transformations.euler_from_quaternion(
            (rot.x, rot.y, rot.z, rot.w), "szxy")
        quat = tf.transformations.quaternion_from_euler(eular[0], 0, 0, "szxy")
        transform.transform.translation.z = 0
        transform.transform.rotation = Quaternion(quat[0], quat[1], quat[2],
                                                  quat[3])

        self.tf_robot_position.sendTransform(
            (trans.x, trans.y, 0), (quat[0], quat[1], quat[2], quat[3]),
            rospy.Time.now(), 'robot_position', 'odom')

        scan = LaserScan()
        scan = data
        projector = LaserProjection()
        cloud = projector.projectLaser(scan)
        fixed_frame_cloud = do_transform_cloud(cloud, transform)
        points = pc2.read_points(fixed_frame_cloud,
                                 field_names=['x', 'y', 'z'],
                                 skip_nans=True)
        pole = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
        data = []
        for x, y, z in points:
            if x > 5 and x < 7 and y > -2 and y < 2:
                data.append([x, y, z, 0xff0000])
                pole[0][0] += x
                pole[0][1] += y
                pole[0][2] += 1
            elif x > 2 and x < 4 and y > -2 and y < 0:
                data.append([x, y, z, 0x00ff00])
                pole[1][0] += x
                pole[1][1] += y
                pole[1][2] += 1
            elif x > 2 and x < 4 and y > 0 and y < 2:
                data.append([x, y, z, 0x0000ff])
                pole[2][0] += x
                pole[2][1] += y
                pole[2][2] += 1
            else:
                data.append([x, y, z, 0xffff00])
        for p in pole:
            if p[2] != 0:
                p[0] /= p[2]
                p[1] /= p[2]

        if pole[0][2] < 5 or (pole[1][2] < 5 and pole[2][2] < 5):
            return

        trans_diff = [6.0 - pole[0][0], 0.0 - pole[0][1]]
        if pole[1][2] != 0:
            rot_diff = math.atan2(-1, -3) - math.atan2(pole[1][1] - pole[0][1],
                                                       pole[1][0] - pole[0][0])
        elif pole[2][2] != 0:
            rot_diff = math.atan2(1, -3) - math.atan2(pole[2][1] - pole[0][1],
                                                      pole[2][0] - pole[0][0])

#        data_pole = []
        for x, y, z, color in data:
            tx = x + trans_diff[0] - 6.0
            ty = y + trans_diff[1]
            rx = tx * math.cos(rot_diff) - ty * math.sin(rot_diff) + 6.0
            ry = tx * math.sin(rot_diff) + ty * math.cos(rot_diff)
            if random.random() < 0.01:
                self.data_pole.append([rx, ry, z, color])


#        print data_pole

        HEADER = Header(frame_id='/odom')
        FIELDS = [
            PointField(name='x',
                       offset=0,
                       datatype=PointField.FLOAT32,
                       count=1),
            PointField(name='y',
                       offset=4,
                       datatype=PointField.FLOAT32,
                       count=1),
            PointField(name='z',
                       offset=8,
                       datatype=PointField.FLOAT32,
                       count=1),
            PointField(name='rgb',
                       offset=12,
                       datatype=PointField.UINT32,
                       count=1),
        ]
        filterd_cloud = pc2.create_cloud(HEADER, FIELDS, self.data_pole)
        self.pc_pub.publish(filterd_cloud)
Beispiel #8
0
    def point_cloud_callback(self, cloud_msg):
        """

        :param cloud_msg:
        :return:
        """

        # cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        # # copy from
        # # classify_image.py
        # image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
        # # Creates graph from saved GraphDef.
        # softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
        # predictions = self._session.run(
        #     softmax_tensor, {'DecodeJpeg/contents:0': image_data})
        # predictions = np.squeeze(predictions)
        # # Creates node ID --> English string lookup.
        # node_lookup = classify_image.NodeLookup()
        # top_k = predictions.argsort()[-self.use_top_k:][::-1]
        # for node_id in top_k:
        #     human_string = node_lookup.id_to_string(node_id)
        #     score = predictions[node_id]
        #     if score > self.score_threshold:
        #         rospy.loginfo('%s (score = %.5f)' % (human_string, score))
        #         self._pub.publish(human_string)
        # read points from pointcloud message `data`
        clock = Clock()
        rospy.logwarn(
            "subscribed. width: %d, height: %u, point_step: %d, row_step: %d",
            cloud_msg.width, cloud_msg.height, cloud_msg.point_step,
            cloud_msg.row_step)

        pc = pc2.read_points(cloud_msg,
                             skip_nans=False,
                             field_names=("x", "y", "z", "intensity"))
        # to conver pc into numpy.ndarray format
        np_p = np.array(list(pc))
        # perform fov filter by using hv_in_range
        cond = self.hv_in_range(x=np_p[:, 0],
                                y=np_p[:, 1],
                                z=np_p[:, 2],
                                fov=[-45, 45])
        # to rotate points according to calibrated points with velo2cam
        # np_p_ranged = np.stack((np_p[:,1],-np_p[:,2],np_p[:,0],np_p[:,3])).T
        np_p_ranged = np_p[cond]

        # get depth map
        lidar = self.pto_depth_map(velo_points=np_p_ranged, C=5)
        lidar_f = lidar.astype(np.float32)
        #normalize intensity from [0,255] to [0,1], as shown in KITTI dataset
        #dep_map[:,:,0] = (dep_map[:,:,0]-0)/np.max(dep_map[:,:,0])
        #dep_map = cv2.resize(src=dep_map,dsize=(512,64))

        # to perform prediction
        lidar_mask = np.reshape(
            (lidar[:, :, 4] > 0),
            [self._mc.ZENITH_LEVEL, self._mc.AZIMUTH_LEVEL, 1])
        lidar_f = (lidar_f - self._mc.INPUT_MEAN) / self._mc.INPUT_STD
        pred_cls = self._session.run(self._model.pred_cls,
                                     feed_dict={
                                         self._model.lidar_input: [lidar_f],
                                         self._model.keep_prob: 1.0,
                                         self._model.lidar_mask: [lidar_mask]
                                     })
        label = pred_cls[0]

        # # generated depth map from LiDAR data
        depth_map = Image.fromarray(
            (255 * _normalize(lidar[:, :, 3])).astype(np.uint8))
        # # classified depth map with label
        # label_map = Image.fromarray(
        #     (255 * visualize_seg(pred_cls, self._mc)[0]).astype(np.uint8))
        # # blending image: out = image1 * (1.0 - alpha) + image2 * alpha
        # blend_map = Image.blend(
        #     depth_map.convert('RGBA'),
        #     label_map.convert('RGBA'),
        #     alpha=0.4
        # )
        #
        # # save classified depth map image with label
        # blend_map.save(os.path.join(FLAGS.out_dir, 'plot_' + file_name + '.png'))
        label_3d = np.zeros((label.shape[0], label.shape[1], 3))
        label_3d[np.where(label == 0)] = [1., 1., 1.]
        label_3d[np.where(label == 1)] = [0., 1., 0.]
        label_3d[np.where(label == 2)] = [1., 1., 0.]
        label_3d[np.where(label == 3)] = [0., 1., 1.]

        ## point cloud in filed of view
        # x = np_p_ranged[:, 0].reshape(-1)
        # y = np_p_ranged[:, 1].reshape(-1)
        # z = np_p_ranged[:, 2].reshape(-1)
        # i = np_p_ranged[:, 3].reshape(-1)
        ## point cloud for SqueezeSeg segments
        x = lidar[:, :, 0].reshape(-1)
        y = lidar[:, :, 1].reshape(-1)
        z = lidar[:, :, 2].reshape(-1)
        i = lidar[:, :, 3].reshape(-1)
        label = label.reshape(-1)
        # cond = (label!=0)
        # print(cond)
        cloud = np.stack((x, y, z, i, label))
        # cloud = np.stack((x, y, z, i))

        label_map = Image.fromarray(
            (255 * _normalize(label_3d)).astype(np.uint8))

        header = Header()
        header.stamp = rospy.Time()
        header.frame_id = "velodyne"
        # feature map & label map
        msg_feature = ImageConverter.to_ros(depth_map)
        msg_feature.header = header
        msg_label = ImageConverter.to_ros(label_map)
        msg_label.header = header

        # point cloud segments
        # 4 PointFields as channel description
        msg_segment = pc2.create_cloud(header=header,
                                       fields=_make_point_field(
                                           cloud.shape[0]),
                                       points=cloud.T)

        self._feature_map_pub.publish(msg_feature)
        self._label_map_pub.publish(msg_label)
        self._pub.publish(msg_segment)
        rospy.loginfo("Point cloud processed. Took %.6f ms.",
                      clock.takeRealTime())
Beispiel #9
0
def NewPoseUsingOpenCV(msg):

    #Wait for position of object from OpenCV node
    while not third_flag:
        pass

    #Store position of object from OpenCV into local variables
    position_OpenCV = msg

    rospy.loginfo("OpenCV Point Position: [ %f, %f, %f ]" %
                  (position_OpenCV.x, position_OpenCV.y, position_OpenCV.z))

    color_pos_x = position_OpenCV.x
    color_pos_y = position_OpenCV.y

    rangefinder_dist = baxter_interface.analog_io.AnalogIO(
        'left_hand_range').state()

    rospy.loginfo("Rangefinder Distance: [ %f]" % (rangefinder_dist))

    pointx = pose_ee[0, 0]
    pointy = pose_ee[1, 0]
    pointz = pose_ee[2, 0]
    quatx = 0
    quaty = 1
    quatz = 0
    quatw = 0

    #Rangefinder distance is over object
    if rangefinder_dist < 100:

        #Close Baxter's left gripper
        baxterleft = baxter_interface.Gripper('left')
        baxterleft.close()

    #Rangefinder distance is over object
    if rangefinder_dist < 150:

        incremental_distance = 0.0025

        #X-position of point not within range of center of frame
        if fabs(color_pos_x - 37) > 20:
            if color_pos_x < 37:
                pointy = pose_ee[1, 0] + incremental_distance
            else:
                pointy = pose_ee[1, 0] - incremental_distance

        #Y-position of point not within range of center of frame
        if fabs(color_pos_y - 94) > 20:
            if color_pos_y < 94:
                pointx = pose_ee[0, 0] - incremental_distance
            else:
                pointx = pose_ee[0, 0] + incremental_distance

        #X-position and Y-position of point are within range of center of frame
        if fabs(color_pos_x - 37) <= 20 and fabs(color_pos_y - 94) <= 20:
            pointx = pose_ee[0, 0]
            pointy = pose_ee[1, 0]
            pointz = pose_ee[2, 0] - 0.05

    #Rangefinder distance is nearly over object
    elif rangefinder_dist < 200:

        incremental_distance = 0.005

        #X-position of point not within range of center of frame
        if fabs(color_pos_x - 37) > 20:
            if color_pos_x < 37:
                pointy = pose_ee[1, 0] + incremental_distance
            else:
                pointy = pose_ee[1, 0] - incremental_distance

        #Y-position of point not within range of center of frame
        if fabs(color_pos_y - 94) > 20:
            if color_pos_y < 94:
                pointx = pose_ee[0, 0] - incremental_distance
            else:
                pointx = pose_ee[0, 0] + incremental_distance

        #X-position and Y-position of point are within range of center of frame
        if fabs(color_pos_x - 37) <= 20 and fabs(color_pos_y - 94) <= 20:
            pointx = pose_ee[0, 0]
            pointy = pose_ee[1, 0]
            pointz = pose_ee[2, 0] - 0.05

    #Rangefinder distance is barely over object
    elif rangefinder_dist < 250:

        incremental_distance = 0.01

        #X-position of point not within range of center of frame
        if fabs(color_pos_x - 31) > 20:
            if color_pos_x < 31:
                pointy = pose_ee[1, 0] + incremental_distance
            else:
                pointy = pose_ee[1, 0] - incremental_distance

        #Y-position of point not within range of center of frame
        if fabs(color_pos_y - 52) > 20:
            if color_pos_y < 52:
                pointx = pose_ee[0, 0] - incremental_distance
            else:
                pointx = pose_ee[0, 0] + incremental_distance

        #X-position and Y-position of point are within range of center of frame
        if fabs(color_pos_x - 31) <= 20 and fabs(color_pos_y - 52) <= 20:
            pointx = pose_ee[0, 0]
            pointy = pose_ee[1, 0]
            pointz = pose_ee[2, 0] - 0.05

    #Rangefinder distance is over object
    elif rangefinder_dist < 350:

        incremental_distance = 0.015

        #X-position of point not within range of center of frame
        if fabs(color_pos_x - 16) > 20:
            if color_pos_x < 16:
                pointy = pose_ee[1, 0] + incremental_distance
            else:
                pointy = pose_ee[1, 0] - incremental_distance

        #Y-position of point not within range of center of frame
        if fabs(color_pos_y - 42) > 20:
            if color_pos_y < 42:
                pointx = pose_ee[0, 0] - incremental_distance
            else:
                pointx = pose_ee[0, 0] + incremental_distance

        #X-position and Y-position of point are within range of center of frame
        if fabs(color_pos_x - 16) <= 20 and fabs(color_pos_y - 42) <= 20:
            pointx = pose_ee[0, 0]
            pointy = pose_ee[1, 0]
            pointz = pose_ee[2, 0] - 0.1

        print pointx, pointy

    #Rangefinder distance is too far above object to get an actual value
    else:

        incremental_distance = 0.02

        #X-position of point not within range of center of frame
        if fabs(color_pos_x - 0) > 50:
            if color_pos_x < 0:
                pointy = pose_ee[1, 0] + incremental_distance
            else:
                pointy = pose_ee[1, 0] - incremental_distance

        #Y-position of point not within range of center of frame
        if fabs(color_pos_y - 0) > 50:
            if color_pos_y < 0:
                pointx = pose_ee[0, 0] - incremental_distance
            else:
                pointx = pose_ee[0, 0] + incremental_distance

        pointz = pose_ee[2, 0] - 0.1

        #X-position and Y-position of point are within range of center of frame
        if fabs(color_pos_x - 0) <= 50 and fabs(color_pos_y - 0) <= 50:
            pointx = pose_ee[0, 0]
            pointy = pose_ee[1, 0]
            pointz = pose_ee[2, 0] - 0.15

    #Combine position and orientation in a PoseStamped() message
    move_to_pose = PoseStamped()
    move_to_pose.header = Header(stamp=rospy.Time.now(), frame_id='base')
    move_to_pose.pose.position = Point(
        x=pointx,
        y=pointy,
        z=pointz,
    )
    move_to_pose.pose.orientation = Quaternion(
        x=quatx,
        y=quaty,
        z=quatz,
        w=quatw,
    )

    print "Desired position to move to", move_to_pose.pose.position
    print "Desired orientation to move to", move_to_pose.pose.orientation

    #Returns PoseStamped() message
    return move_to_pose
 def as_MoveToGoal(self, linear=[0, 0, 0], angular=[0, 0, 0], **kwargs):
     return MoveToGoal(header=Header(frame_id=self.frame_id, ),
                       posetwist=self.as_PoseTwist(linear, angular),
                       **kwargs)
 def header(self):
     return Header(frame_id=self.frame_id, )
 def as_PoseTwistStamped(self, linear=[0, 0, 0], angular=[0, 0, 0]):
     return PoseTwistStamped(
         header=Header(frame_id=self.frame_id, ),
         posetwist=self.as_PoseTwist(linear, angular),
     )
Beispiel #13
0
    def callback(self,detectionarray):
        #rospy.loginfo(rospy.get_name() + " received detection for time stamp "+str(det.header.stamp.secs))

        #1. Calculate stable area(s) in map frame
        #2. Decrement all garbage collector counts for all bricks in visible area(s) If counter reaches zero, remove brick
        #3. For all detected bricks, compute cloud area
        #4. Find overlaps of detected cloud areas with existing detections - ideally find overlaps in a way that total distance is minimized across all matches
        #5. Update the overlapped bricks with new detection data, and reset the counter
        #6. Insert new bricks which had no overlap, with full counter
        #rospy.loginfo(rospy.get_name() + " waited for transform ")

        # Build the new bricks
        new_bricks=[]
        for i in range(0,len(detectionarray.detections)):
            singledetection = detectionarray.detections[i]
            p_cam=singledetection.position
            self.listener.waitForTransform("map", detectionarray.header.frame_id, singledetection.stamp, rospy.Duration.from_sec(10.0))
            p_map = self.listener.transformPoint("map", PointStamped(header=Header(frame_id=detectionarray.header.frame_id,stamp=singledetection.stamp),point=p_cam))
            radius=self.calc_radius(p_cam)
            new_bricks.append(Brick(position_cam=p_cam,position_map=p_map.point, color=self.convert_brick_color(singledetection.color), radius=radius, count=RESET_COUNT, source=singledetection.source, id=-1))

        # Remove double detections in near and far camera
        i=len(new_bricks) - 1
        while i>=0:
            if new_bricks[i].source=="FAR":
                #FAR detection found: if there is any NEAR detection within tolerance from this detection, remove FAR
                #This is to avoid placing duplicates in case the same brick appears in near and far detection
                for j in range(0,len(new_bricks)):
                    if detectionarray.detections[j].source=="NEAR" and self.calc_distance(new_bricks[i].position_map,new_bricks[j].position_map)<0.05:
                        del(new_bricks[i])
                        break
            i-=1

        #
        # Obtain lock to manipulate list of bricks
        stable_poly_map=self.transform_poly(camera_coord.STABLE_POLY, detectionarray.header.stamp, "camera_link", "map") #Theorteically, do this per brick with brick's timesetamps
        with self.lock:
            # Purges pool of visible bricks by decrementing the counters (only in stable detection area)
            i = len(self.pool) - 1
            while i >= 0:
                brick = self.pool[i]
                if self.ray_tracing(brick.position_map, stable_poly_map):
                    # Brick is in stable detection area - decrement the counter and remove if reaches zero
                    brick.dec_count()
                    if brick.count <= 0:
                        del self.pool[i]
                i = i - 1
            #
            # For each existing brick on entire map, try to find a newly detected brick which is in the radius.
            # If a new brick is found in the radius, it is assumed to be the same as the existing brick:
            # remove it from the list of newly detected bricks, and reset the counter in the existing brick.
            # at the end, what remains in the list of newly detected bricks
            #rospy.loginfo(rospy.get_name() + " detected {} new bricks, next id is {}".format(len(new_bricks),self.brick_id))
            for b in self.pool:
                i = len(new_bricks) - 1
                while i >= 0:
                    n = new_bricks[i]
                    distance = self.calc_distance(n.position_map,b.position_map)
                    #rospy.loginfo(rospy.get_name() + " Distance between new and {} is {}, sum of radius is {}".format(b.id,distance,n.radius+b.radius))
                    if distance < n.radius+b.radius:
                        #rospy.loginfo(rospy.get_name() + " new and {} overlap, removing new".format(b.id))
                        #Found an overlap of new with existing:
                        #1. Reset counter in existing, and update it's position etc
                        #2. remove the new one (as it was actually not a new one)
                        b.count=RESET_COUNT
                        b.position_cam = n.position_cam #Always update camera position (as robot moves...)
                        if n.radius < b.radius:
                            #Update the position and the radius if the new detection is better (i.e. taken from closer)
                            b.radius=n.radius
                            b.position_map=n.position_map
                        del new_bricks[i]
                        break
                    i = i - 1
            #
            # Now assign an id to each new brick, as they are authentically new
            for n in new_bricks:
                n.id = self.create_new_brick_id()
                rospy.loginfo(rospy.get_name() + " Added new brick with id {}".format(n.id) )
            #
            # Put all new bricks into the pool
            self.pool.extend(new_bricks)
            #
            # Now send out an updated marker message with all current bricks
            header = Header(frame_id="map", stamp=detectionarray.header.stamp)
            markers=MarkerArray()
            markers.markers.append(Marker(action=Marker.DELETEALL,ns="brick"))
            markers.markers.append(Marker(action=Marker.DELETEALL,ns="cylinder"))
            markers.markers.append(Marker(action=Marker.DELETEALL,ns="text"))
            for i in range(0,len(self.pool)):
                brick=self.pool[i]
                # Create the cube
                pose=Pose(position=Point(x=brick.position_map.x, y=brick.position_map.y, z=brick.position_map.z + marker_scale.z / 2), orientation=forward)
                m=Marker(header=header,ns="brick",id=brick.id,type=Marker.CUBE,action=Marker.ADD,pose=pose,scale=marker_scale,color=brick.color)
                markers.markers.append(m)
                # Create the cylinder
                cyl_scale=Vector3(x=brick.radius*2,y=brick.radius*2,z=marker_scale.z/2)
                cyl_m=Marker(header=header,ns="cylinder",id=brick.id,type=Marker.CYLINDER,action=Marker.ADD,pose=pose,scale=cyl_scale,color=cyl_color)
                markers.markers.append(cyl_m)

                # Create the label
                text_point=Point(x=brick.position_map.x, y=brick.position_map.y, z=brick.position_map.z + 0.1)
                test_pose=Pose(position=text_point,orientation=forward)
                text_m=Marker(header=header,ns="text",id=brick.id,type=Marker.TEXT_VIEW_FACING,action=Marker.ADD,pose=test_pose,scale=text_scale,color=text_color,text=str(brick.id))
                markers.markers.append(text_m)
        #rospy.loginfo(rospy.get_name() + " publishing markers ")
        self.markers_pub.publish(markers)
Beispiel #14
0
    def callback_tim(self,timer_event):
        ps_stable=PolygonStamped(header=Header(frame_id="camera_link"), polygon=self.convert_poly_to_msg(camera_coord.STABLE_POLY))
        self.stable_area_pub.publish(ps_stable)

        ps_gripper=PolygonStamped(header=Header(frame_id="gripper"), polygon=self.convert_poly_to_msg(GRIPPER_POLY))
        self.gripper_area_pub.publish(ps_gripper)

        self.idle_waypoints_pub.publish(PoseArray(header=Header(frame_id="map"),poses=IDLE_WAYPOINTS))

        current_time = rospy.Time.now()
        if current_time < self.wait_until:
            #Timer still ticking
            return

        # Now trace the stuff
        goal_status = self.move_base_action.get_state()
        if goal_status == GoalStatus.ABORTED:
            # Goal was aborted - infeasible to get there
            # Clean out map (maybe some bad detections which do not really exist), and go back to idle
            rospy.loginfo(rospy.get_name() + " move_base has aborted - cleaning out map and going back to idle state")
            with self.lock:
                self.pool = []
                self.brick_goal = None
                self.status = STATUS_IDLE
        if current_time > self.log_next_goal_status:
            rospy.loginfo(rospy.get_name() + " move base goal status is {} ({}), state machine is in status {} ".format(goal_status,STATUS_STR[goal_status],self.status))
            self.log_next_goal_status=current_time+GOAL_STATUS_LOG_INTERVAL

        # Get the current robot position in the map frame
        # TODO: instead of waiting, it would be nicer to use a tolerance
        self.listener.waitForTransform("map","gripper", current_time, rospy.Duration.from_sec(0.2))
        self.listener.waitForTransform("base_link","gripper", current_time, rospy.Duration.from_sec(0.2))
        self.listener.waitForTransform("map","base_link", current_time, rospy.Duration.from_sec(0.2))
        #pose_map = self.listener.transformPose("map", PoseStamped(header=Header(frame_id = "base_link",stamp=current_time),pose=Pose(position=Point(x=0,y=0,z=0),orientation=Quaternion(0,0,0,1)))).pose;
        pose_gripper_map = self.listener.transformPose("map", PoseStamped(header=Header(frame_id = "gripper",stamp=current_time),pose=Pose(position=Point(x=0,y=0,z=0),orientation=Quaternion(0,0,0,1)))).pose;
        pose_gripper_base = self.listener.transformPose("base_link", PoseStamped(header=Header(frame_id = "gripper",stamp=current_time),pose=Pose(position=Point(x=0,y=0,z=0),orientation=Quaternion(0,0,0,1)))).pose;
        pose_base_map = self.listener.transformPose("map", PoseStamped(header=Header(frame_id = "base_link",stamp=current_time),pose=Pose(position=Point(x=0,y=0,z=0),orientation=Quaternion(0,0,0,1)))).pose;
        # Get the gripper window
        gripper_poly_map = self.transform_poly(GRIPPER_POLY, current_time, "gripper", "map")
        if self.status in [STATUS_IDLE,STATUS_APPROACHING,STATUS_MOVINGBASE]:
            # Check if the brick still exists and is still reachable - otherwise cancel goal (if there is any) and go back to idle status
            with self.lock:
                if self.brick_goal is not None and (self.brick_goal not in self.pool):
                    rospy.loginfo(rospy.get_name() + " brick {} is no longer a valid goal - cancelling ".format(self.brick_goal.id))
                    self.move_base_action.cancel_goal() #Cancel any current goal - just in case
                    self.brick_goal = None
                    self.status = STATUS_IDLE
                    return
        # Check if we see any object in the gripper area - if yes, grab it (no matter of what the current target object is)
        if self.status in [STATUS_IDLE,STATUS_MOVINGBASE]:
            with self.lock:
                #If there are >1 in the gripper area, find the one which is closest to the gripper (or closest to the base?)
                closest_brick = None
                for b in self.pool:
                    if self.ray_tracing(b.position_map, gripper_poly_map):
                        d = self.calc_distance(b.position_map,pose_gripper_map.position);
                        if closest_brick is None or d < distance:
                            distance = d
                            closest_brick = b
                # check if we found a brick within gripper area
                if closest_brick is not None:
                    rospy.loginfo(rospy.get_name() + " Found brick {} in gripper area - approach it ".format(closest_brick.id))
                    self.brick_goal = closest_brick
                    self.target_brick_pos = closest_brick.position_map
                    self.move_base_action.cancel_goal() #Cancel any current goal - just in case
                    self.approaching_valid_pos_counter = 0
                    self.status = STATUS_APPROACHING
                    return
        #
        if self.status == STATUS_IDLE:
            with self.lock:
                #Find the closest brick (if any) - and move towards it
                closest_brick = None
                for b in self.pool:
                    d = self.calc_distance(b.position_map, pose_gripper_map.position);
                    if closest_brick is None or d < distance:
                        distance = d
                        closest_brick = b
            # check if we found a closest brick
            if closest_brick is not None:
                rospy.loginfo(rospy.get_name() + " Closest brick is {}, sending goal".format(closest_brick.id))
                #self.next_idle_waypoint = None # reset idle path
                self.brick_goal = closest_brick
                self.target_brick_pos = closest_brick.position_map
                self.target_pose = self.compute_base_target_pos(closest_brick,pose_gripper_map,pose_gripper_base)
                #rospy.loginfo(rospy.get_name() + " Going with base link to coordinates {}".format(target_pose_base_map))
                self.status = STATUS_MOVINGBASE
                #Send the action
                rospy.loginfo(rospy.get_name() + " Sending goal to move base")
                self.set_speed(MAX_SPEED)
                self.move_base_action.send_goal(MoveBaseGoal(target_pose=PoseStamped(header=Header(frame_id="map", stamp=current_time), pose=self.target_pose)))
                return
            else:
                # No bricks around - follow an idle path
                updated_waypoint = None
                if self.next_idle_waypoint is None:
                        #Find the closest waypoint from current position (this is for the very first time)
                        for waypoint in IDLE_WAYPOINTS:
                            if updated_waypoint is None or self.calc_distance(pose_base_map.position,waypoint.position) < self.calc_distance(pose_base_map.position,updated_waypoint.position):
                                updated_waypoint = waypoint
                        rospy.loginfo(rospy.get_name() + " found closest waypoint {}".format(updated_waypoint))
                elif goal_status != GoalStatus.ACTIVE:
                    updated_waypoint = self.next_idle_waypoint
                    #rospy.loginfo(rospy.get_name() + " goal status is not active; resuming path to waypoint {}".format(updated_waypoint))
                elif self.calc_distance(pose_base_map.position,self.next_idle_waypoint.position) < WAYPOINT_TOLERANCE:
                    # We are quite close to the waypoint - update goal to next waypoint before the robot will start tricks to reach goal exactly
                    i = (IDLE_WAYPOINTS.index(self.next_idle_waypoint)+1) % len(IDLE_WAYPOINTS)
                    updated_waypoint = IDLE_WAYPOINTS[i]
                    #rospy.loginfo(rospy.get_name() + " close to waypoint {}, going to next waypoint {}, {}".format(self.next_idle_waypoint,i,updated_waypoint))

                if updated_waypoint is not None:
                    #Send target to next waypoint
                    #rospy.loginfo(rospy.get_name() + " going to waypoint {}".format(updated_waypoint))
                    self.next_idle_waypoint = updated_waypoint
                    self.set_speed(MAX_SPEED)
                    self.move_base_action.send_goal(MoveBaseGoal(target_pose=PoseStamped(header=Header(frame_id="map", stamp=current_time),pose=updated_waypoint)))
                return

        if self.status==STATUS_MOVINGBASE:
            # Check if we're healthy and moving
            if goal_status == GoalStatus.PENDING or goal_status == GoalStatus.ACTIVE:
                distance = self.calc_distance(pose_gripper_map.position,self.brick_goal.position_map)
                if distance<MAX_DISTANCE_SLOWDOWN:
                    speed = (MAX_SPEED - MIN_SPEED)/(MAX_DISTANCE_SLOWDOWN-MIN_DISTANCE_SLOWDOWN) *(distance-MIN_DISTANCE_SLOWDOWN) + MIN_SPEED
                    speed = max(speed,MIN_SPEED)
                    self.set_speed(speed)
                #Still moving
                # When getting close to target (brick_goal is in far camera, and close enough): Adjust the goal, as we're getting more precision
                d = self.calc_distance(self.target_brick_pos,self.brick_goal.position_map);
                tolerance = distance*0.2 #If distance to target is 2m, tolerance is 40 cm. If distance is 50cm, tolerance is 10 cm
                if d > tolerance:
                    rospy.loginfo(rospy.get_name() + " Readjusting goal because previous target is {} m off".format(d))
                    target_pose = self.compute_base_target_pos(self.brick_goal, pose_gripper_map, pose_gripper_base)
                    self.target_pose = target_pose
                    self.target_brick_pos = self.brick_goal.position_map
                    self.move_base_action.send_goal(MoveBaseGoal(target_pose=PoseStamped(header=Header(frame_id="map", stamp=current_time), pose=self.target_pose)))
                return
            if goal_status != GoalStatus.SUCCEEDED:
                # Finished, but with error
                rospy.loginfo(rospy.get_name() + " Could not reach goal")
            # reached the goal or aborted - go back to idle mode (which will initate gripper if it is in the right place)
            else:
                rospy.loginfo(rospy.get_name() + " Goal reached")
            self.brick_goal = None
            self.status = STATUS_IDLE
            return

        if self.status == STATUS_APPROACHING:
            # Try placing the brick directly under the gripper, then wait a bit for getting a few stable shots
            if not self.ray_tracing(self.brick_goal.position_map,gripper_poly_map):
                # The brick has left the gripper area - give up
                rospy.loginfo(rospy.get_name() + " Brick no longer in gripper area - giving up.")
                self.brick_goal = None
                self.status = STATUS_IDLE
                return
            # compute the required position adjustment, and send to motor controller
            # Note that for this adjustment, we use the camera position on the brick directly (rather than going
            # through map frame) to avoid jitters in loop via the map transform
            brick_cam = PointStamped(header=Header(stamp=current_time, frame_id="camera_link"), point=self.brick_goal.position_cam)
            brick_base = self.listener.transformPoint("base_link",brick_cam)
            if math.sqrt((brick_base.point.x - pose_gripper_base.position.x) ** 2 + (brick_base.point.y - pose_gripper_base.position.y) ** 2)  < GRIPPER_TOLERANCE:
                self.approaching_valid_pos_counter += 1
                rospy.loginfo(rospy.get_name() + " Identified {} frames at correct position ".format(self.approaching_valid_pos_counter))
                #wait for a few detections to be at the exact same base_link location - only then initiate gripper
                if self.approaching_valid_pos_counter > 5:
                    # Gripper is close enough - grip!
                    rospy.loginfo(rospy.get_name() + " Moving gripper forward")
                    self.gripper_proxy(action="MOVE", duration=GRIPPER_MOVE_TIME, position=GRIPPER_POS_DOWN)
                    self.wait_until = current_time + GRIPPER_MOVE_TIME
                    self.status = STATUS_GRIPPER_LOWERING
                else:
                    self.wait_until = current_time + Duration(0.5) #Wait a bit for next frame
            else:
                # Gripper is not close enough - adjust
                rospy.loginfo(rospy.get_name() + " Adjusting gripper position")
                self.approaching_valid_pos_counter=0
                brick_cam = PointStamped(header=Header(stamp=current_time, frame_id="camera_link"), point=self.brick_goal.position_cam)
                brick_gripper = self.listener.transformPoint("gripper",brick_cam)
                #rospy.loginfo(rospy.get_name() + " base coordinates: brick: {}, gripper: {}".format(brick_base.point,pose_gripper_base.position))
                forward_m = brick_base.point.x - pose_gripper_base.position.x #we simply take the difference in x, don't care for rotation (as rotation is small anyway)
                if abs(forward_m) > 0.05:
                    # To avoid shooting over the target: reduce the forward in case we're not approaching really closely
                    # This will help also because there is a max acceleration, which would typically lead to overshoot
                    forward_m = forward_m * 0.8
                alpha_rad = math.atan2(brick_base.point.y, brick_base.point.x) - math.atan2(pose_gripper_base.position.y,pose_gripper_base.position.x)  # in rad
                delta_m= alpha_rad / 3.657 #Approximation. TODO: Clean up / merge with speed control one
                rospy.loginfo(rospy.get_name() + " Need to go forward {} m, with a delta of {} m".format(forward_m,delta_m))
                readpos_reply=self.readpos_proxy()
                forward_enc = enc_per_m * forward_m
                delta_enc = enc_per_m * delta_m
                positions_enc = [
                    readpos_reply.encoder[0] + forward_enc - delta_enc,
                    readpos_reply.encoder[1] + forward_enc - delta_enc,
                    readpos_reply.encoder[2] + forward_enc + delta_enc,
                    readpos_reply.encoder[3] + forward_enc + delta_enc
                ]
                #Set the time to such that speed is MIN_SPEED, MIN_ANG_SPEED
                duration = Duration(max(abs(forward_m)/MIN_SPEED,abs(alpha_rad)/MIN_ANG_SPEED,0.5))
                #Use the time as an offset on the received time (microcontroller may be slightly offset)
                writeposentry = motorctrl_writeentry(time=readpos_reply.time + duration, target=positions_enc)
                writeposentries = [writeposentry]
                self.writepos_proxy(writeposentries=writeposentries)
                # add some time to get mecanicaally stable once movement has finished
                self.wait_until = current_time + duration + Duration(0.3)
            return

        if self.status == STATUS_GRIPPER_LOWERING:
            rospy.loginfo(rospy.get_name() + " Closing gripper")
            self.gripper_proxy(action="CLOSE",duration=GRIPPER_GRIP_TIME)
            self.wait_until = current_time + GRIPPER_GRIP_TIME
            self.status = STATUS_GRIPPER_CLOSING
            return

        if self.status == STATUS_GRIPPER_CLOSING:
            rospy.loginfo(rospy.get_name() + " Moving gripper backward")
            self.gripper_proxy(action="MOVE", duration=GRIPPER_MOVE_TIME, position=GRIPPER_POS_UP)
            self.wait_until = current_time + GRIPPER_MOVE_TIME
            self.status = STATUS_GRIPPER_RAISING
            return

        if self.status == STATUS_GRIPPER_RAISING:
            rospy.loginfo(rospy.get_name() + " Opening gripper")
            self.gripper_proxy(action="OPEN",duration=GRIPPER_GRIP_TIME)
            self.wait_until = current_time + GRIPPER_GRIP_TIME
            self.status = STATUS_GRIPPER_OPENING
            return

        if self.status == STATUS_GRIPPER_OPENING:
            rospy.loginfo(rospy.get_name() + " Gripper opened")
            self.brick_goal = None
            self.status = STATUS_IDLE
            return
    print "Which plan would you like to generate? "
    plan_dict = {}
    for i, name in enumerate(plan_names):
        plan_dict[name] = plans[plan_names[i]]()
    return plan_dict


if __name__ == '__main__':
    rospy.init_node("controller_runner")
    configs = generate_plan()

    if type(configs) == XYHVPath:
        rospy.loginfo('022 path called')
        path = configs
    else:
        h = Header()
        h.stamp = rospy.Time.now()
        desired_speed = 2.0  # 0.5
        ramp_percent = 0.1
        ramp_up = np.linspace(0.0, desired_speed,
                              int(ramp_percent * len(configs)))
        ramp_down = np.linspace(desired_speed, 0.3,
                                int(ramp_percent * len(configs)))
        speeds = np.zeros(len(configs))
        speeds[:] = desired_speed
        print len(configs)
        #print("configs:", configs)
        speeds[0:len(ramp_up)] = ramp_up
        speeds[-len(ramp_down):] = ramp_down
        path = XYHVPath(h, [
            XYHV(*[config[0], config[1], config[2], speed])
Beispiel #16
0
def NewPoseUsingQRcode(msg):

    while not second_flag:
        pass

    print "position of end-effector=", pose_ee[0:3, :]

    #Store pose of QR code from camera into local variables
    position_visp = msg.pose.position
    quat_visp = msg.pose.orientation

    rospy.loginfo("Tag Point Position: [ %f, %f, %f ]" %
                  (position_visp.x, position_visp.y, position_visp.z))
    rospy.loginfo("Tag Quat Orientation: [ %f, %f, %f, %f]" %
                  (quat_visp.x, quat_visp.y, quat_visp.z, quat_visp.w))

    tag_pos_x = position_visp.x
    tag_pos_y = position_visp.y
    tag_pos_z = position_visp.z

    tag_quat_x = quat_visp.x
    tag_quat_y = quat_visp.y
    tag_quat_z = quat_visp.z
    tag_quat_w = quat_visp.w

    #Determines is there was a change from previous tag position and current
    if math.fabs(tag_pos_x - previous_tag[0, 0]) < 1e-2 and math.fabs(
            tag_pos_y - previous_tag[0, 1]) < 1e-2 and math.fabs(
                tag_pos_z - previous_tag[0, 2]) < 1e-1:
        print "NO movement"
    else:
        global movement
        movement = movement + 1

    print "Movement counter:", movement
    #Account for changes in tag position and only move towards the tag once
    if movement == 0:
        tag_pos_x = 0
        tag_pos_y = 0
        tag_pos_z = 0
        tag_quat_x = 0
        tag_quat_y = 0
        tag_quat_z = 0
        tag_quat_w = 1
    elif movement == 2:
        movement = 0

        tag_pos_x = 0
        tag_pos_y = 0
        tag_pos_z = 0
        tag_quat_x = 0
        tag_quat_y = 0
        tag_quat_z = 0
        tag_quat_w = 1

        #Close Baxter's left gripper
        baxterleft = baxter_interface.Gripper('left')
        baxterleft.close()

    #New pose to move towards
    pointx = pose_ee[0, 0] - tag_pos_x
    pointy = pose_ee[1, 0] - tag_pos_y
    pointz = pose_ee[2, 0] - tag_pos_z

    quatx = pose_ee[3, 0]  # - tag_quat_x
    quaty = pose_ee[4, 0]  # - tag_quat_y
    quatz = pose_ee[5, 0]  # - tag_quat_z
    quatw = pose_ee[6, 0]  # - tag_quat_w

    #Combine position and orientation in a PoseStamped() message
    move_to_pose = PoseStamped()
    move_to_pose.header = Header(stamp=rospy.Time.now(), frame_id='base')
    move_to_pose.pose.position = Point(
        x=pointx,
        y=pointy,
        z=pointz,
    )
    move_to_pose.pose.orientation = Quaternion(
        x=quatx,
        y=quaty,
        z=quatz,
        w=quatw,
    )

    print "Desired position to move to", move_to_pose.pose.position
    print "Desired orientation to move to", move_to_pose.pose.orientation

    #Update previous_tag with the distance from Baxter's camera to QR code
    global previous_tag
    previous_tag[0, 0] = msg.pose.position.x
    previous_tag[0, 1] = msg.pose.position.y
    previous_tag[0, 2] = msg.pose.position.z

    return move_to_pose
    def right_arm_go_to_pose_goal(self):
        # 设置动作对象变量,此处为arm
        right_arm = self.right_arm

        # 获取当前末端执行器位置姿态
        pose_goal = right_arm.get_current_pose().pose

        # 限制末端夹具运动
        right_joint_const = JointConstraint()
        right_joint_const.joint_name = "gripper_r_joint_r"
        if Rightfinger > -55:
            right_joint_const.position = 0.024
        else:
            right_joint_const.position = 0
        right_joint_const.weight = 1.0

        # 限制1轴转动
        right_joint1_const = JointConstraint()
        right_joint1_const.joint_name = "yumi_joint_1_r"
        right_joint1_const.position = 0
        right_joint1_const.tolerance_above = 120
        right_joint1_const.tolerance_below = 0
        right_joint1_const.weight = 1.0

        # 限制2轴转动
        right_joint2_const = JointConstraint()
        right_joint2_const.joint_name = "yumi_joint_2_r"
        right_joint2_const.position = 0
        right_joint2_const.tolerance_above = 0
        right_joint2_const.tolerance_below = 150
        right_joint2_const.weight = 1.0

        # 限制3轴转动
        right_joint3_const = JointConstraint()
        right_joint3_const.joint_name = "yumi_joint_3_r"
        right_joint3_const.position = 0
        right_joint3_const.tolerance_above = 35
        right_joint3_const.tolerance_below = 55
        right_joint3_const.weight = 1.0

        # 限制4轴转动
        right_joint4_const = JointConstraint()
        right_joint4_const.joint_name = "yumi_joint_4_r"
        right_joint4_const.position = 0
        right_joint4_const.tolerance_above = 60
        right_joint4_const.tolerance_below = 75
        right_joint4_const.weight = 1.0

        # 限制5轴转动
        right_joint5_const = JointConstraint()
        right_joint5_const.joint_name = "yumi_joint_5_r"
        right_joint5_const.position = 40
        right_joint5_const.tolerance_above = 50
        right_joint5_const.tolerance_below = 20
        right_joint5_const.weight = 1.0

        # 限制6轴转动
        right_joint6_const = JointConstraint()
        right_joint6_const.joint_name = "yumi_joint_6_r"
        right_joint6_const.position = 0
        right_joint6_const.tolerance_above = 10
        right_joint6_const.tolerance_below = 35
        right_joint6_const.weight = 1.0

        # 限制7轴转动
        right_joint7_const = JointConstraint()
        right_joint7_const.joint_name = "yumi_joint_7_r"
        right_joint7_const.position = -10
        right_joint7_const.tolerance_above = 0
        right_joint7_const.tolerance_below = 160
        right_joint7_const.weight = 1.0

        # 限制末端位移
        right_position_const = PositionConstraint()
        right_position_const.header = Header()
        right_position_const.link_name = "gripper_r_joint_r"
        right_position_const.target_point_offset.x = 0.5
        right_position_const.target_point_offset.y = -0.5
        right_position_const.target_point_offset.z = 1.0
        right_position_const.weight = 1.0

        # 添加末端姿态约束:
        right_orientation_const = OrientationConstraint()
        right_orientation_const.header = Header()
        right_orientation_const.orientation = pose_goal.orientation
        right_orientation_const.link_name = "gripper_r_finger_r"
        right_orientation_const.absolute_x_axis_tolerance = 0.50
        right_orientation_const.absolute_y_axis_tolerance = 0.25
        right_orientation_const.absolute_z_axis_tolerance = 0.50
        right_orientation_const.weight = 100

        # 施加全约束
        consts = Constraints()
        consts.joint_constraints = [right_joint_const]
        # consts.orientation_constraints = [right_orientation_const]
        # consts.position_constraints = [right_position_const]
        right_arm.set_path_constraints(consts)

        # 设置动作对象目标位置姿态
        pose_goal.orientation.x = Right_Qux
        pose_goal.orientation.y = Right_Quy
        pose_goal.orientation.z = Right_Quz
        pose_goal.orientation.w = Right_Quw
        pose_goal.position.x = (Neurondata[5] - 0.05) * 1.48 + 0.053
        pose_goal.position.y = (Neurondata[3] + 0.18) * 1.48 - 0.12
        pose_goal.position.z = (Neurondata[4] - 0.53) * 1.48 + 0.47
        right_arm.set_pose_target(pose_goal)
        print "End effector pose %s" % pose_goal

        # # 设置动作对象目标位置姿态
        # pose_goal.orientation.x = 0.1644
        # pose_goal.orientation.y = 0.3111
        # pose_goal.orientation.z = 0.9086
        # pose_goal.orientation.w = 0.2247
        # pose_goal.position.x = (Neurondata[5]-0.05)*1.48+0.053
        # pose_goal.position.y = (Neurondata[3]+0.18)*1.48-0.12
        # pose_goal.position.z = (Neurondata[4]-0.53)*1.48+0.47
        # right_arm.set_pose_target(pose_goal)
        # print "End effector pose %s" % pose_goal

        # # 设置动作对象目标位置姿态
        # pose_goal.orientation.x = pose_goal.orientation.x
        # pose_goal.orientation.y = pose_goal.orientation.y
        # pose_goal.orientation.z = pose_goal.orientation.z
        # pose_goal.orientation.w = pose_goal.orientation.w
        # pose_goal.position.x = pose_goal.position.x
        # pose_goal.position.y = pose_goal.position.y - 0.01
        # pose_goal.position.z = pose_goal.position.z
        # right_arm.set_pose_target(pose_goal)
        # print "End effector pose %s" % pose_goal

        # 规划和输出动作
        traj = right_arm.plan()
        right_arm.execute(traj, wait=False)
        # 动作完成后清除目标信息
        right_arm.clear_pose_targets()
        # 清除路径约束
        right_arm.clear_path_constraints()
        # 确保没有剩余未完成动作在执行
        right_arm.stop()
Beispiel #18
0
def callback(depth_data, rgb_data):
    print('rostime:', rospy.get_time())
    depth = ros_numpy.numpify(depth_data)
    depth_matlab = matlab.double(np.squeeze(depth))
    rgb = ros_numpy.numpify(rgb_data)
    rgb = np.moveaxis(rgb, -1, 0)
    #np.save('depth.npy', depth)
    #np.save('rgb.npy', rgb)

    print('RGB_SHAPE:', rgb.shape, 'DEPTH_SHAPE:', depth.shape)
    assert depth_data.header.stamp == rgb_data.header.stamp

    try:
        (trans, rot) = listener.lookupTransform('/world', '/camera',
                                                depth_data.header.stamp)
        trans_mat = tf.transformations.translation_matrix(trans)
        rot_mat = tf.transformations.quaternion_matrix(rot)
        cam_pose = np.dot(trans_mat, rot_mat)
        np.save('cam_pose.npy', cam_pose)
        vox_origin = matlab_eng.MYperpareDataTest(depth_matlab)
        position = dataset._depth2position(depth, cam_pose, vox_origin,
                                           dataset.param)

    except:
        rospy.logerr('FAIL')
        return

    var_depth = Variable(
        torch.unsqueeze(torch.unsqueeze(torch.tensor(depth), 0),
                        0).float()).cuda()
    var_rgb = Variable(torch.unsqueeze(torch.tensor(rgb), 0).float()).cuda()
    position = torch.tensor(position).long().cuda()
    print(var_depth.shape, var_rgb.shape, position.shape)

    with torch.no_grad():
        pred = net(x_depth=var_depth, x_rgb=var_rgb, p=position)
    torch.cuda.empty_cache()
    y_pred = pred.cpu().data.numpy()
    print(y_pred.shape)

    predict_completion = np.squeeze(np.argmax(y_pred, axis=1))
    ids_occ = np.argwhere(predict_completion != 0)
    labels_seg_occ = [
        predict_completion[id_[0], id_[1], id_[2]] for id_ in ids_occ
    ]
    rospy.loginfo("number of predicted occupied voxels:%d", ids_occ.shape[0])
    poses_occ = ids_occ * VOX_UNIT_OUT

    # Declaring pointcloud
    occ_pointcloud = PointCloud()

    # Filling pointcloud header
    header = Header()
    header.stamp = rospy.Time.now()
    header.frame_id = '/camera'
    occ_pointcloud.header = header

    channel_r = ChannelFloat32()
    channel_r.name = "r"
    channel_g = ChannelFloat32()
    channel_g.name = "g"
    channel_b = ChannelFloat32()
    channel_b.name = "b"

    # Filling points
    #print('FILLING POINTS')
    for pose, label in zip(poses_occ, labels_seg_occ):
        occ_pointcloud.points.append(Point32(pose[0], pose[1], pose[2]))
        channel_r.values.append(colorMap[label][0])
        channel_g.values.append(colorMap[label][1])
        channel_b.values.append(colorMap[label][2])
    occ_pointcloud.channels.append(channel_r)
    occ_pointcloud.channels.append(channel_g)
    occ_pointcloud.channels.append(channel_b)
    # Publish pointcloud
    pointcloud_publisher.publish(occ_pointcloud)
    def both_arms_go_to_pose_goal(self):
        # 设置动作对象变量,此处为both_arms
        both_arms = self.both_arms
        # 获取当前各轴转角
        axis_angle = both_arms.get_current_joint_values()
        # print axis_angle
        # 获取当前末端执行器位置姿态
        right_pose_goal = both_arms.get_current_pose(
            end_effector_link="gripper_r_finger_r")
        left_pose_goal = both_arms.get_current_pose(
            end_effector_link="gripper_l_finger_r")
        print right_pose_goal
        # 限制末端夹具运动
        right_joint_const = JointConstraint()
        right_joint_const.joint_name = "gripper_r_joint_r"
        if Rightfinger > -55:
            right_joint_const.position = 0.024
        else:
            right_joint_const.position = 0
        left_joint_const = JointConstraint()
        left_joint_const.joint_name = "gripper_l_joint_r"
        if Leftfinger > -32:
            left_joint_const.position = 0.024
        else:
            left_joint_const.position = 0

        # 添加末端姿态约束:
        right_orientation_const = OrientationConstraint()
        right_orientation_const.header = Header()
        right_orientation_const.orientation = right_pose_goal.pose.orientation
        right_orientation_const.link_name = "gripper_r_joint_r"
        right_orientation_const.absolute_x_axis_tolerance = 0.6
        right_orientation_const.absolute_y_axis_tolerance = 0.6
        right_orientation_const.absolute_z_axis_tolerance = 0.6
        right_orientation_const.weight = 1

        left_orientation_const = OrientationConstraint()
        left_orientation_const.header = Header()
        left_orientation_const.orientation = left_pose_goal.pose.orientation
        left_orientation_const.link_name = "gripper_l_joint_r"
        left_orientation_const.absolute_x_axis_tolerance = 0.6
        left_orientation_const.absolute_y_axis_tolerance = 0.6
        left_orientation_const.absolute_z_axis_tolerance = 0.6
        left_orientation_const.weight = 1

        # 施加全约束
        consts = Constraints()
        consts.joint_constraints = [right_joint_const, left_joint_const]
        # consts.orientation_constraints = [right_orientation_const, left_orientation_const]
        both_arms.set_path_constraints(consts)

        # # 设置动作对象目标位置姿态
        # # 右臂
        # right_pose_goal.pose.orientation.x = Right_Qux
        # right_pose_goal.pose.orientation.y = Right_Quy
        # right_pose_goal.pose.orientation.z = Right_Quz
        # right_pose_goal.pose.orientation.w = Right_Quw
        # right_pose_goal.pose.position.x = Neurondata[5]
        # right_pose_goal.pose.position.y = Neurondata[3]
        # right_pose_goal.pose.position.z = Neurondata[4]
        # # 左臂
        # left_pose_goal.pose.orientation.x = Left_Qux
        # left_pose_goal.pose.orientation.y = Left_Quy
        # left_pose_goal.pose.orientation.z = Left_Quz
        # left_pose_goal.pose.orientation.w = Left_Quw
        # left_pose_goal.pose.position.x = Neurondata[11]
        # left_pose_goal.pose.position.y = Neurondata[9]
        # left_pose_goal.pose.position.z = Neurondata[10]

        # # 右臂
        # right_pose_goal.pose.orientation.x = Right_Qux
        # right_pose_goal.pose.orientation.y = Right_Quy
        # right_pose_goal.pose.orientation.z = Right_Quz
        # right_pose_goal.pose.orientation.w = Right_Quw
        # right_pose_goal.pose.position.x = (1266/740)*(Neurondata[5]+0.28)-0.495
        # right_pose_goal.pose.position.y = (1295/780)*(Neurondata[3]+0.56)-0.754
        # right_pose_goal.pose.position.z = (1355/776)*(Neurondata[4]-0.054)-0.184
        # # 左臂
        # left_pose_goal.pose.orientation.x = Left_Qux
        # left_pose_goal.pose.orientation.y = Left_Quy
        # left_pose_goal.pose.orientation.z = Left_Quz
        # left_pose_goal.pose.orientation.w = Left_Quw
        # left_pose_goal.pose.position.x = (1266/850)*(Neurondata[11]+0.33)-0.495
        # left_pose_goal.pose.position.y = (1295/720)*(Neurondata[9]+0.19)-0.541
        # left_pose_goal.pose.position.z = (1355/745)*(Neurondata[10]-0.055)-0.184

        # 右臂
        right_pose_goal.pose.orientation.x = Right_Qux
        right_pose_goal.pose.orientation.y = Right_Quy
        right_pose_goal.pose.orientation.z = Right_Quz
        right_pose_goal.pose.orientation.w = Right_Quw
        right_pose_goal.pose.position.x = (Neurondata[5] - 0.05) * 1.48 + 0.053
        right_pose_goal.pose.position.y = (Neurondata[3] + 0.18) * 1.48 - 0.12
        right_pose_goal.pose.position.z = (Neurondata[4] - 0.53) * 1.48 + 0.47
        # 左臂
        left_pose_goal.pose.orientation.x = Left_Qux
        left_pose_goal.pose.orientation.y = Left_Quy
        left_pose_goal.pose.orientation.z = Left_Quz
        left_pose_goal.pose.orientation.w = Left_Quw
        left_pose_goal.pose.position.x = (Neurondata[11] - 0.05) * 1.48 + 0.053
        left_pose_goal.pose.position.y = (Neurondata[9] - 0.18) * 1.48 + 0.12
        left_pose_goal.pose.position.z = (Neurondata[10] - 0.53) * 1.48 + 0.47

        # # 右臂
        # right_pose_goal.pose.orientation.x = right_pose_goal.pose.orientation.x
        # right_pose_goal.pose.orientation.y = right_pose_goal.pose.orientation.y
        # right_pose_goal.pose.orientation.z = right_pose_goal.pose.orientation.z
        # right_pose_goal.pose.orientation.w = right_pose_goal.pose.orientation.w
        # right_pose_goal.pose.position.x = right_pose_goal.pose.position.x
        # right_pose_goal.pose.position.y = right_pose_goal.pose.position.y
        # right_pose_goal.pose.position.z = right_pose_goal.pose.position.z
        # # 左臂
        # left_pose_goal.pose.orientation.x = left_pose_goal.pose.orientation.x
        # left_pose_goal.pose.orientation.y = left_pose_goal.pose.orientation.y
        # left_pose_goal.pose.orientation.z = left_pose_goal.pose.orientation.z
        # left_pose_goal.pose.orientation.w = left_pose_goal.pose.orientation.w
        # left_pose_goal.pose.position.x = left_pose_goal.pose.position.x
        # left_pose_goal.pose.position.y = left_pose_goal.pose.position.y
        # left_pose_goal.pose.position.z = left_pose_goal.pose.position.z

        # 设置动作组的两个目标点
        both_arms.set_pose_target(right_pose_goal,
                                  end_effector_link="gripper_r_finger_r")
        both_arms.set_pose_target(left_pose_goal,
                                  end_effector_link="gripper_l_finger_r")
        # 规划和输出动作
        traj = both_arms.plan()
        both_arms.execute(traj, wait=False)
        # # 清除路径约束
        both_arms.clear_path_constraints()
        # 确保输出停止
        both_arms.stop()
Beispiel #20
0
def handlemarkers(msg):
	global empty_counter
	global x
	global y
	global z
	global id
	global last_action
	global lastcoords
	global lastarg
	global issafe

	# Action history dealing ---------------------------------------------------
	if msg.markers == []:
		# print "No marker counter:" + str(empty_counter)
		print '\n'
		empty_counter += 1
		stop(None)
		if empty_counter <= 2:
			print("Temporary lost markers! Re-doing last action!")
			try:
				last_action(lastarg)
			except NameError:
				pass
		else:
			print("No markers in sight! Engaging scout mode")
			stopnsearch(searchturn)
	#---------------------------------------------------------------------------
	# Getting attributes from markers
	for marker in msg.markers:
		empty_counter = 0
		## TRANSFORMACAO P/ RETORNAR AS CORDENADAS DO MARCADOR CORRETAMENTE
		id = marker.id
		marcador = "ar_marker_" + str(id)
		header = Header(frame_id=marcador)

		try:
			trans = tf_buffer.lookup_transform(frame, marcador, rospy.Time(0))
		except BaseException:
			pass

		x = round(trans.transform.translation.x,2)
		y = round(trans.transform.translation.y,2)
		z = round(trans.transform.translation.z,2)
		t = transformations.translation_matrix([x, y, z])
		# Encontra as rotacoes e cria uma matriz de rotacao a partir dos quaternions
		r = transformations.quaternion_matrix([trans.transform.rotation.x, trans.transform.rotation.y, trans.transform.rotation.z, trans.transform.rotation.w])
		m = numpy.dot(r,t) # Criamos a matriz composta por translacoes e rotacoes
		z_marker = [0,0,1,0] # Sao 4 coordenadas porque e'  um vetor em coordenadas homogeneas
		v2 = numpy.dot(m, z_marker)
		v2_n = v2[0:-1] # Descartamos a ultima posicao
		n2 = v2_n/linalg.norm(v2_n) # Normalizamos o vetor
		x_robo = [1,0,0]
		cosa = numpy.dot(n2, x_robo) # Projecao do vetor normal ao marcador no x do robo
		angulo_marcador_robo = round(math.degrees(math.acos(cosa)),2)

		## SISTEMA ANTIGO -----------------------------------------------------
		# x = round(marker.pose.pose.position.x * 1e308 *100,2)
		# y = round(marker.pose.pose.position.y * 1e308 *100,2)
		# z = round(marker.pose.pose.position.z * 1e308 *1000000000,2)
		# -----------------------------------------------------------
		print(marker.id, "x:", x, " y:", y, " z:", z, "angulo:",angulo_marcador_robo)  #maker_pose
		lastcoords[0] = x
		lastcoords[1] = y
		lastcoords[2] = z
		counter = 0
		print '\n'

		# Dealing with marker inputs -------------------------------------------
		if empty_counter < historysize and issafe == True: # Check for robot safety and action history
			if marker.id == 8:
				lastarg = lastcoords[2]
				retreat(lastcoords[2])
				last_action = retreat

			elif marker.id == 5:
				lastarg = lastcoords[0]
				move_forward(lastarg)
				last_action = move_forward

			elif marker.id == 100:
				spin(None)
				last_action = spin
				lastarg = None

			else:
				stop(None)
				last_action = stop
Beispiel #21
0
 def create_point_cloud_message(self, pts):
     header = Header()
     header.stamp = rospy.Time.now()
     header.frame_id = '/world'
     cloud_message = pcl2.create_cloud_xyz32(header, pts)
     return cloud_message
def ik_service_client(limb, desired_pose, use_advanced_options=False):
    ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
    iksvc = rospy.ServiceProxy(ns, SolvePositionIK)  #connection to the service
    ikreq = SolvePositionIKRequest()  #send a request from client to service
    hdr = Header(stamp=rospy.Time.now(), frame_id='base')
    poses = {
        'right':
        PoseStamped(
            header=hdr,
            pose=Pose(
                position=Point(
                    x=desired_pose.position.x,
                    y=desired_pose.position.y,
                    z=desired_pose.position.z,
                ),
                orientation=Quaternion(
                    x=desired_pose.orientation.x,
                    y=desired_pose.orientation.y,
                    z=desired_pose.orientation.z,
                    w=desired_pose.orientation.w,
                ),
            ),
        ),
    }
    # Add desired pose for inverse kinematics
    #send to position and orientation to the service
    ikreq.pose_stamp.append(poses[limb])
    # Request inverse kinematics from base to "right_hand" link
    ikreq.tip_names.append('right_hand')

    if (use_advanced_options):
        # Optional Advanced IK parameters
        rospy.loginfo("Running Advanced IK Service Client example.")
        # The joint seed is where the IK position solver starts its optimization
        ikreq.seed_mode = ikreq.SEED_USER
        seed = JointState()
        seed.name = [
            'right_j0', 'right_j1', 'right_j2', 'right_j3', 'right_j4',
            'right_j5', 'right_j6'
        ]
        seed.position = [0.1, 0.1, -0.1, -0.1, 0.2, -2, 2]
        ikreq.seed_angles.append(seed)

        # Once the primary IK task is solved, the solver will then try to bias the
        # the joint angles toward the goal joint configuration. The null space is
        # the extra degrees of freedom the joints can move without affecting the
        # primary IK task.
        ikreq.use_nullspace_goal.append(True)
        # The nullspace goal can either be the full set or subset of joint angles
        goal = JointState()
        goal.name = ['right_j1', 'right_j2', 'right_j3']
        goal.position = [0.1, 0.1, 0.3]
        ikreq.nullspace_goal.append(goal)
        # The gain used to bias toward the nullspace goal. Must be [0.0, 1.0]
        # If empty, the default gain of 0.4 will be used
        ikreq.nullspace_gain.append(0.5)
    else:
        rospy.loginfo("Running Simple IK Service Client example.")

    try:
        rospy.wait_for_service(ns, 5.0)
        resp = iksvc(ikreq)
    except (rospy.ServiceException, rospy.ROSException), e:
        rospy.logerr("Service call failed: %s" % (e, ))
        return False
Beispiel #23
0
            pitch_deg = pitch_deg + 360.0
        pitch = pitch_deg * degrees2rad

        roll_deg = float(words[2])
        if roll_deg > 180.0:
            roll_deg = roll_deg - 360.0
        if roll_deg < -180.0:
            roll_deg = roll_deg + 360.0
        roll = roll_deg * degrees2rad

    if pub_mode == "true":
        output_txt = "roll : " + str(round(roll, 2)) + ", "
        output_txt += "pitch : " + str(round(pitch, 2)) + ", "
        output_txt += "yaw : " + str(round(yaw, 2))

        marker.header = Header(stamp=rospy.Time.now(), frame_id="map")
        marker.text = output_txt

        marker_pub.publish(marker)

    q = quaternion_from_euler(roll, pitch, yaw)
    imuMsg.orientation.x = q[0]
    imuMsg.orientation.y = q[1]
    imuMsg.orientation.z = q[2]
    imuMsg.orientation.w = q[3]

    imuMsg.header.stamp = rospy.Time.now()

    imuMsg.header.seq = seq
    seq = seq + 1
    pub.publish(imuMsg)
def main():

    rospy.init_node('grasp_detector', anonymous=True)
    cloud_msg = rospy.wait_for_message('/transformed_cloud_camLink', PointCloud2)
    cloud_arr = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(cloud_msg)


    ## Segment out the points that fit the table plane
    cloud_seg = pcl_helper.plane_seg(cloud_arr)
    cloud_arr = cloud_seg.to_array()
    cloud_colors_arr =100*np.ones((cloud_arr.shape[0],3))

    ## output data info ##
    '''
    print('Total number of points: %s', cloud_arr.shape)
    print('HighestPoint %s', np.nanmax(cloud_arr[:,2]))
    print('LowestPoint %s', np.nanmin(cloud_arr[:,2]))
    print('Farthest Point %s', np.nanmax(cloud_arr[:,0]))
    print('Nearest Point %s', np.nanmin(cloud_arr[:,0]))
    #print('Total number of points above the table: %s', pc.shape)
    '''

    ## Cluster the pointcloud into various target objects ##
    clc = cloud_clustering(cloud_seg)
    clc.euclid_cluster()
    all_grasps = []
    all_scores = []
    all_labels = np.array(([]))

    print(str(len(clc.clusters))+' clusters detected in the input cloud...')

    ## Run inference for each cluster
    for i, cloud in enumerate(clc.clusters):
        points = cloud.to_array()

        ## Grasp-Estimation ##

        print('Estimating Grasps on cluster ' +str(i+1))
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            points,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )

        print(str(len(generated_scores)) + ' grasps generated')



        scores_np = np.asarray(generated_scores)
        sorting = np.argsort(-scores_np)
        sorted_scores = scores_np[sorting]

        grasps_np = np.asarray(generated_grasps)
        sorted_grasps = grasps_np[sorting]


        all_grasps += sorted_grasps.tolist()          ## Sort grasps for each object
        all_scores += sorted_scores.tolist()
        all_labels = np.concatenate([all_labels, (i+1)*np.ones(sorted_grasps.shape[0])])
        print(all_scores)

    #clc.cluster_mask()
    #clc.cluster_publish()          #Publish clusters as clouds
    print(len(all_scores))
    print(type(all_grasps))

    #mlab.figure(bgcolor=(1,1,1))
    #print(generated_scores)

    ## Sorting all grasps together ##
    scores_np = np.asarray(all_scores)
    sorting = np.argsort(-scores_np)    #Negative sign is for descending-order sorting
    sorted_scores = scores_np[sorting]
    sorted_labels = all_labels[sorting]

    grasps_np = np.asarray(all_grasps)
    sorted_grasps = grasps_np[sorting]
    filtered_grasps, filtered_scores, grasp_msgs = filter_grasps(sorted_grasps)
    #filtered_grasps =  sorted_grasps#[filter]
    #filtered_scores =  sorted_scores#[filter]
    print ('Filtered Grasps: '+str(filtered_scores.shape[0]))

    pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(), frame_id='panda_camera_link'), poses = grasp_msgs))


    if len(all_scores)!=0:
        draw_scene(
                    cloud_arr,
                    pc_color=cloud_colors_arr,
                    grasps= filtered_grasps.tolist(),
                    grasp_scores= filtered_scores.tolist())
    else:
        draw_scene(
                cloud_arr,
                pc_color = cloud_colors_arr
            )



    mlab.show()
def main():
    userID = systemArgHandler()

    global mylimb

    rospy.init_node('lift_ik_prototype', anonymous=True)
    """
	myps = PoseStamped(
		header=Header(stamp=rospy.Time.now(), frame_id='base'),
		pose=pose2(init_pos),
	)
	solve_move_trac(mylimb, myps)
	"""

    rate = rospy.Rate(5000.0)
    #main loop
    while not rospy.is_shutdown():
        try:
            #defines the pose of the child from the parent
            buffUserLeft = tf_buffer.lookup_transform(
                'cob_body_tracker/user_' + str(userID) + '/left_shoulder',
                'cob_body_tracker/user_' + str(userID) + '/left_hand',
                rospy.Time())
            #get translations
            x = buffUserLeft.transform.translation.x
            y = buffUserLeft.transform.translation.y
            z = buffUserLeft.transform.translation.z

            #translate the translations
            tranRightX = -z * 1.2 + 0.2
            tranRightY = x * 1.5 - 0.28
            tranRightZ = -y * 1.1 + 0.40
    #tranRightX = -z * 1.2
    #tranRightY =  x * 1.2
    #tranRightZ = -y * 0.9

    #catch and continue after refresh rate
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rate.sleep()
            continue

        user_pos = Vectors.V4D(
            tranRightX,  #depth (z?) (inline depth 0.2) (limit 1.0)
            tranRightY,  #left-right (x?) (inline 0.28) (limit 1.0)
            tranRightZ,
            0)  #height (y?) (inline height 0.40)

        #user_rot = Vectors.V4D(math.pi/2,
        #	((math.atan2(y,x) + math.pi/2) % (math.pi * 2)) - 10*math.pi/180,
        #	0, 0)
        user_rot = Vectors.V4D(0, math.pi / 2, 0, 0)

        leftArmPose = PoseStamped(
            header=Header(stamp=rospy.Time.now(), frame_id='base'),
            #header=Header(stamp=rospy.Time.now(), frame_id=mylimb+'_arm_mount'),
            #header=Header(stamp=rospy.Time.now(), frame_id=mylimb+'_lower_shoulder'),
            pose=userPose(user_pos, user_rot),
        )

        solve_move_trac(mylimb, leftArmPose)

        #Refresh rate
        rate.sleep()
def compute_grasps_handle(req):
    tf_buffer = tf2_ros.Buffer(rospy.Duration(1200.0))
    tf_listener = tf2_ros.TransformListener(tf_buffer)
    print("Request for Grasp compute recieved by graspnet!!")
    #return AddTwoIntsResponse(req.a + req.b)

    ## Subscribers ##
    #camera_info_topic = '/panda/camera/depth/camera_info'
    #depth_img_topic = '/panda/camera/depth/image_raw'
    #rgb_img_topic ='/panda/camera/color/image_raw'
    cloud_topic = '/transformed_cloud_camLink'

    cloud_msg = rospy.wait_for_message(cloud_topic, PointCloud2)
    pc = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(cloud_msg)
    pc_colors=100*np.ones((pc.shape[0],3))

## output data info ##
    print('Total number of points: %s', pc.shape)
    print('HighestPoint %s', np.nanmax(pc[:,2]))
    print('LowestPoint %s', np.nanmin(pc[:,2]))
    print('Farthest Point %s', np.nanmax(pc[:,0]))
    print('Nearest Point %s', np.nanmin(pc[:,0]))
    #print('Total number of points above the table: %s', pc.shape)

    # Smoothed pc comes from averaging the depth for 10 frames and removing
    # the pixels with jittery depth between those 10 frames.
    #object_pc = data['smoothed_object_pc']

    #points_obj = xyzrgb_array_to_pointcloud2(object_pc, pc_colors, stamp = rospy.Time.now(), frame_id = 'world')
    #pub2.publish(points_obj)
    ## Segment out the points that fit the table plane
    cloud_seg = pcl_helper.plane_seg(pc)
    #cloud_arr = cloud_seg.to_array()
    #cloud_colors_arr =100*np.ones((cloud_arr.shape[0],3))

    clc = cloud_clustering(cloud_seg)
    clc.euclid_cluster()
    cloud = clc.clusters[0]     # Run grasp-detection only on the closest cloud cluster

    points = cloud.to_array()
    cloud_colors =100*np.ones((points.shape[0],3))
    ## Grasp-Estimation ##

    latents = estimator.sample_latents()
    generated_grasps, generated_scores, _ = estimator.predict_grasps(
        sess,
        points,
        latents,
        num_refine_steps=cfg.num_refine_steps,
    )


    ## Sorting grasps ##
    scores_np = np.asarray(generated_scores)
    sorting = np.argsort(-scores_np)    #Negative sign is for descending-order sorting
    sorted_scores = scores_np[sorting]

    grasps_np = np.asarray(generated_grasps)
    sorted_grasps = grasps_np[sorting]
    print('Total generated grasps '+str(len(sorted_grasps.tolist())))
    filtered_grasps, filtered_scores, grasp_msgs = filter_grasps(sorted_grasps)
    print('Filtered grasps '+str(len(filtered_grasps.tolist())))
    try:
        tf_stamped = tf_buffer.lookup_transform('world', 'panda_camera_link' , rospy.Time(0))

    except Exception as inst:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        print('exception: '+str(inst)+' in '+ str(exc_tb.tb_lineno))

    # Transform the grasps to world frame - which is the ref. frame for Panda robot
    transformed_grasps = transform_poses(filtered_grasps.tolist(),tf_stamped)

    for i, grasp in enumerate(transformed_grasps):
        if grasp.position.z < 0.45: #predefined table height
            transformed_grasps.pop(i)

    print('Transformed grasps above the table '+str(len(transformed_grasps)))
    points_obj = pcl_helper.xyzrgb_array_to_pointcloud2(points, cloud_colors, stamp = rospy.Time.now(), frame_id = 'panda_camera_link')
    pub2.publish(points_obj)
    pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(), frame_id='world'), poses = transformed_grasps))
    return PoseArray(header=Header(stamp=rospy.Time.now(), frame_id='world'), poses = transformed_grasps)
Beispiel #27
0
    def publish_perspectives(self):
        header = Header()
        header.frame_id = self.reference_frame
        header.stamp = rospy.Time()
        perspective_array_stamped = PerspectiveArrayStamped()
        perspective_array_stamped.header = header
        cpt = 0
        rospy.logdebug("%s perspectives to send" %
                       str(len(self.visible_nodes)))
        for agent_id, visible_nodes in self.visible_nodes.items():
            perspective_array_stamped.perspectives.append(Perspective())
            perspective_array_stamped.perspectives[cpt].agent_id = str(
                agent_id)
            perspective_array_stamped.perspectives[cpt].agent_name = str(
                self.source.scene.nodes[agent_id].name)
            for node in visible_nodes:
                if self.isobject(self.source.scene, node):
                    obj_msg = Object()
                    obj_msg.id = node.id
                    obj_msg.name = node.name
                    t = translation_from_matrix(
                        get_world_transform(self.source.scene, node))
                    q = quaternion_from_matrix(
                        get_world_transform(self.source.scene, node))
                    pose = Pose()
                    pose.position.x = t[0]
                    pose.position.y = t[1]
                    pose.position.z = t[2]
                    pose.orientation.x = q[0]
                    pose.orientation.y = q[1]
                    pose.orientation.z = q[2]
                    pose.orientation.w = q[3]
                    pose_stamped = PoseStamped(header, pose)
                    obj_msg.pose = pose_stamped
                    perspective_array_stamped.perspectives[
                        cpt].objects_seen.append(obj_msg)

                if node.type == CAMERA:
                    agent_msg = Agent()
                    agent_msg.id = node.id
                    agent_msg.name = node.name
                    t = translation_from_matrix(
                        get_world_transform(self.source.scene, node))
                    q = quaternion_from_matrix(
                        get_world_transform(self.source.scene, node))
                    pose = Pose()
                    pose.position.x = t[0]
                    pose.position.y = t[1]
                    pose.position.z = t[2]
                    pose.orientation.x = q[0]
                    pose.orientation.y = q[1]
                    pose.orientation.z = q[2]
                    pose.orientation.w = q[3]
                    pose_stamped = PoseStamped(header, pose)
                    agent_msg.head_gaze_pose = pose_stamped
                    agent_bodies = []
                    for child_id in node.children:
                        node = self.source.scene.nodes[child_id]
                        object_msg = Object()
                        object_msg.id = node.id
                        object_msg.name = node.name
                        t = translation_from_matrix(
                            get_world_transform(self.source.scene, node))
                        q = quaternion_from_matrix(
                            get_world_transform(self.source.scene, node))
                        pose = Pose()
                        pose.position.x = t[0]
                        pose.position.y = t[1]
                        pose.position.z = t[2]
                        pose.orientation.x = q[0]
                        pose.orientation.y = q[1]
                        pose.orientation.z = q[2]
                        pose.orientation.w = q[3]
                        pose_stamped = PoseStamped(header, pose)
                        object_msg.pose = pose_stamped
                        agent_bodies.append(object_msg)
                    agent_msg.agent_bodies = agent_bodies
                    perspective_array_stamped.perspectives[
                        cpt].agents_seen.append(agent_msg)
            cpt += 1

        self.ros_publishers["perspectives"].publish(perspective_array_stamped)
        rospy.logdebug(
            "[perspective_filter] %s perspectives published, %s should be" %
            (str(len(perspective_array_stamped.perspectives)), cpt))
Beispiel #28
0
        return math.atan2(local_frame_goal.y, local_frame_goal.x)

    def convert_to_local_frame(self, stamped_point):
        self.tf_listener.waitForTransform("/base_link",
                                          stamped_point.header.frame_id,
                                          rospy.Time(), rospy.Duration(4))
        return self.tf_listener.transformPoint("/base_link", stamped_point)

    def go_to(self, destination):
        local_frame_goal = self.convert_to_local_frame(destination).point
        while self.euclidean_distance(local_frame_goal) > 0.05:
            local_frame_goal = self.convert_to_local_frame(destination).point

            vel_msg = Twist()
            vel_msg.linear.x = self.euclidean_distance(local_frame_goal)
            vel_msg.angular.z = self.angular_velocity(local_frame_goal)
            self.velocity_publisher.publish(vel_msg)

            self.rate.sleep()


if __name__ == "__main__":
    try:
        robot = AToB()
        destination = PointStamped(header=Header(stamp=rospy.Time.now(),
                                                 frame_id="/odom"),
                                   point=Point(-12.9481, 22.9615, 0.0))
        robot.go_to(destination)
    except rospy.ROSInterruptException:
        pass
Beispiel #29
0
	def spin_once(self):

		def quat_from_orient(orient):
			'''Build a quaternion from orientation data.'''
			try:
				w, x, y, z = orient['quaternion']
				return (x, y, z, w)
			except KeyError:
				pass
			try:
				return quaternion_from_euler(pi*orient['roll']/180.,
						pi*orient['pitch']/180, pi*orient['yaw']/180.)
			except KeyError:
				pass
			try:
				m = identity_matrix()
				m[:3,:3] = orient['matrix']
				return quaternion_from_matrix(m)
			except KeyError:
				pass

		# get data
		data = self.mt.read_measurement()
		# common header
		h = Header()
		h.stamp = rospy.Time.now()
		h.frame_id = self.frame_id

		# get data (None if not present)
		temp = data.get('Temp')	# float
		raw_data = data.get('RAW')
		imu_data = data.get('Calib')
		orient_data = data.get('Orient')
		velocity_data = data.get('Vel')
		position_data = data.get('Pos')
		rawgps_data = data.get('RAWGPS')
		status = data.get('Stat')	# int

		# create messages and default values
		imu_msg = Imu()
		imu_msg.orientation_covariance = (-1., )*9
		imu_msg.angular_velocity_covariance = (-1., )*9
		imu_msg.linear_acceleration_covariance = (-1., )*9
		pub_imu = False
		gps_msg = NavSatFix()
		xgps_msg = GPSFix()
		pub_gps = False
		vel_msg = TwistStamped()
		pub_vel = False
		mag_msg = Vector3Stamped()
		pub_mag = False
		temp_msg = Float32()
		pub_temp = False

		# fill information where it's due
		# start by raw information that can be overriden
		if raw_data: # TODO warn about data not calibrated
			pub_imu = True
			pub_vel = True
			pub_mag = True
			pub_temp = True
			# acceleration
			imu_msg.linear_acceleration.x = raw_data['accX']
			imu_msg.linear_acceleration.y = raw_data['accY']
			imu_msg.linear_acceleration.z = raw_data['accZ']
			imu_msg.linear_acceleration_covariance = (0., )*9
			# gyroscopes
			imu_msg.angular_velocity.x = raw_data['gyrX']
			imu_msg.angular_velocity.y = raw_data['gyrY']
			imu_msg.angular_velocity.z = raw_data['gyrZ']
			imu_msg.angular_velocity_covariance = (0., )*9
			vel_msg.twist.angular.x = raw_data['gyrX']
			vel_msg.twist.angular.y = raw_data['gyrY']
			vel_msg.twist.angular.z = raw_data['gyrZ']
			# magnetometer
			mag_msg.vector.x = raw_data['magX']
			mag_msg.vector.y = raw_data['magY']
			mag_msg.vector.z = raw_data['magZ']
			# temperature
			# 2-complement decoding and 1/256 resolution
			x = raw_data['temp']
			if x&0x8000:
				temp_msg.data = (x - 1<<16)/256.
			else:
				temp_msg.data = x/256.
		if rawgps_data:
			if rawgps_data['bGPS']<self.old_bGPS:
				pub_gps = True
				# LLA
				xgps_msg.latitude = gps_msg.latitude = rawgps_data['LAT']*1e-7
				xgps_msg.longitude = gps_msg.longitude = rawgps_data['LON']*1e-7
				xgps_msg.altitude = gps_msg.altitude = rawgps_data['ALT']*1e-3
				# NED vel # TODO?
				# Accuracy
				# 2 is there to go from std_dev to 95% interval
				xgps_msg.err_horz = 2*rawgps_data['Hacc']*1e-3
				xgps_msg.err_vert = 2*rawgps_data['Vacc']*1e-3
			self.old_bGPS = rawgps_data['bGPS']
		if temp is not None:
			pub_temp = True
			temp_msg.data = temp
		if imu_data:
			try:
				x = imu_data['gyrX']
				y = imu_data['gyrY']
				z = imu_data['gyrZ']

				v = numpy.array([x, y, z])
				v = v.dot(self.R)

				imu_msg.angular_velocity.x = v[0]
				imu_msg.angular_velocity.y = v[1]
				imu_msg.angular_velocity.z = v[2]
				imu_msg.angular_velocity_covariance = (radians(0.025), 0., 0., 0.,
						radians(0.025), 0., 0., 0., radians(0.025))
				pub_imu = True
				vel_msg.twist.angular.x = v[0]
				vel_msg.twist.angular.y = v[1]
				vel_msg.twist.angular.z = v[2]
				pub_vel = True
			except KeyError:
				pass
			try:
				x = imu_data['accX']
				y = imu_data['accY']
				z = imu_data['accZ']

				v = numpy.array([x, y, z])
				v = v.dot(self.R)

				imu_msg.linear_acceleration.x = v[0]
				imu_msg.linear_acceleration.y = v[1]
				imu_msg.linear_acceleration.z = v[2]
				imu_msg.linear_acceleration_covariance = (0.0004, 0., 0., 0.,
						0.0004, 0., 0., 0., 0.0004)
				pub_imu = True
			except KeyError:
				pass
			try:
				x = imu_data['magX']
				y = imu_data['magY']
				z = imu_data['magZ']

				v = numpy.array([x, y, z])
				v = v.dot(self.R)

				mag_msg.vector.x = v[0]
				mag_msg.vector.y = v[1]
				mag_msg.vector.z = v[2]
				pub_mag = True
			except KeyError:
				pass
		if velocity_data:
			pub_vel = True
			vel_msg.twist.linear.x = velocity_data['Vel_X']
			vel_msg.twist.linear.y = velocity_data['Vel_Y']
			vel_msg.twist.linear.z = velocity_data['Vel_Z']
		if orient_data:
			pub_imu = True
			orient_quat = quat_from_orient(orient_data)
			imu_msg.orientation.x = orient_quat[0]
			imu_msg.orientation.y = orient_quat[1]
			imu_msg.orientation.z = orient_quat[2]
			imu_msg.orientation.w = orient_quat[3]
			imu_msg.orientation_covariance = (radians(1.), 0., 0., 0.,
					radians(1.), 0., 0., 0., radians(9.))
		if position_data:
			pub_gps = True
			xgps_msg.latitude = gps_msg.latitude = position_data['Lat']
			xgps_msg.longitude = gps_msg.longitude = position_data['Lon']
			xgps_msg.altitude = gps_msg.altitude = position_data['Alt']
		if status is not None:
			if status & 0b0001:
				self.stest_stat.level = DiagnosticStatus.OK
				self.stest_stat.message = "Ok"
			else:
				self.stest_stat.level = DiagnosticStatus.ERROR
				self.stest_stat.message = "Failed"
			if status & 0b0010:
				self.xkf_stat.level = DiagnosticStatus.OK
				self.xkf_stat.message = "Valid"
			else:
				self.xkf_stat.level = DiagnosticStatus.WARN
				self.xkf_stat.message = "Invalid"
			if status & 0b0100:
				self.gps_stat.level = DiagnosticStatus.OK
				self.gps_stat.message = "Ok"
			else:
				self.gps_stat.level = DiagnosticStatus.WARN
				self.gps_stat.message = "No fix"
			self.diag_msg.header = h
			self.diag_pub.publish(self.diag_msg)

			if pub_gps:
				if status & 0b0100:
					gps_msg.status.status = NavSatStatus.STATUS_FIX
					xgps_msg.status.status = GPSStatus.STATUS_FIX
					gps_msg.status.service = NavSatStatus.SERVICE_GPS
					xgps_msg.status.position_source = 0b01101001
					xgps_msg.status.motion_source = 0b01101010
					xgps_msg.status.orientation_source = 0b01101010
				else:
					gps_msg.status.status = NavSatStatus.STATUS_NO_FIX
					xgps_msg.status.status = GPSStatus.STATUS_NO_FIX
					gps_msg.status.service = 0
					xgps_msg.status.position_source = 0b01101000
					xgps_msg.status.motion_source = 0b01101000
					xgps_msg.status.orientation_source = 0b01101000
		# publish available information
		if pub_imu:
			imu_msg.header = h
			self.imu_pub.publish(imu_msg)
		if pub_gps:
			xgps_msg.header = gps_msg.header = h
			self.gps_pub.publish(gps_msg)
			self.xgps_pub.publish(xgps_msg)
		if pub_vel:
			vel_msg.header = h
			self.vel_pub.publish(vel_msg)
		if pub_mag:
			mag_msg.header = h
			self.mag_pub.publish(mag_msg)
		if pub_temp:
			self.temp_pub.publish(temp_msg)
class Recognizer(object):
    def __init__(self, model, cascade_filename, run_local, wait, rp):
        self.rp = rp
        self.wait = wait
        self.doRun = True
        self.model = model
        self.restart = False
        self.ros_restart_request = False
        self.detector = CascadedDetector(cascade_fn=cascade_filename,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        if run_local:
            print ">> Error: Run local selected in ROS based Recognizer"
            sys.exit(1)
        else:
            self.bridge = CvBridge()

        def signal_handler(signal, frame):
            print ">> ROS Exiting"
            self.doRun = False

        signal.signal(signal.SIGINT, signal_handler)

    def image_callback(self, ros_data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(ros_data, "bgr8")
        except Exception, ex:
            print ex
            return
        # Resize the frame to half the original size for speeding up the detection process.
        # In ROS we can control the size, so we are sending a 320*240 image by default.
        img = cv2.resize(cv_image, (320, 240), interpolation=cv2.INTER_CUBIC)
        # img = cv2.resize(cv_image, (cv_image.shape[1] / 2, cv_image.shape[0] / 2), interpolation=cv2.INTER_CUBIC)
        # img = cv_image
        imgout = img.copy()
        # Remember the Persons found in current image
        persons = []
        for _i, r in enumerate(self.detector.detect(img)):
            x0, y0, x1, y1 = r
            # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
            face = img[y0:y1, x0:x1]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
            face = cv2.resize(face,
                              self.model.image_size,
                              interpolation=cv2.INTER_CUBIC)
            prediction = self.model.predict(face)
            predicted_label = prediction[0]
            classifier_output = prediction[1]
            # Now let's get the distance from the assuming a 1-Nearest Neighbor.
            # Since it's a 1-Nearest Neighbor only look take the zero-th element:
            distance = classifier_output['distances'][0]
            # Draw the face area in image:
            cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 0, 255), 2)
            # Draw the predicted name (folder name...):
            draw_str(imgout, (x0 - 20, y0 - 40),
                     "Label " + self.model.subject_names[predicted_label])
            draw_str(imgout, (x0 - 20, y0 - 20),
                     "Feature Distance " + "%1.1f" % distance)
            msg = Person()
            point = Point()
            # Send the center of the person's bounding box
            mid_x = float(x1 + (x1 - x0) * 0.5)
            mid_y = float(y1 + (y1 - y0) * 0.5)
            point.x = mid_x
            point.y = mid_y
            # Z is "mis-used" to represent the size of the bounding box
            point.z = x1 - x0
            msg.position = point
            msg.name = str(self.model.subject_names[predicted_label])
            msg.reliability = float(distance)
            persons.append(msg)
        if len(persons) > 0:
            h = Header()
            h.stamp = rospy.Time.now()
            h.frame_id = '/ros_cam'
            msg = People()
            msg.header = h
            for p in persons:
                msg.people.append(p)
            self.rp.publisher.publish(msg)
        cv2.imshow('OCVFACEREC < ROS STREAM', imgout)
        cv2.waitKey(self.wait)

        try:
            z = self.ros_restart_request
            if z:
                self.restart = True
                self.doRun = False
        except Exception, e:
            pass