Beispiel #1
0
 def getEnv(self, key, iter=0, first=False):
     
     val = self.base.getModel(key)
     
     print key, val
     
     obj = None
     obj_list = []
     parent_tf = tf.identity_matrix()
     
     if self.base.typeOf(key) == self.base.ROBOTS.ident:
         obj = self._getRobot(key)
         obj_list.append(obj)
         parent_tf = obj.GetTransform()
     
     elif self.base.typeOf(key) == self.base.OBJECTS.ident:
         obj = self._getObject(key)
         obj_list.append(obj)
         parent_tf = obj.GetTransform()
             
     elif self.base.typeOf(key) == self.base.LOCATIONS.ident:
         obj = self._getLocation(key)
         obj_list.append(obj)
         parent_tf = obj.GetTransform()
         
     elif self.base.typeOf(key) == self.base.SENSORS.ident:
         obj = self._getSensor(key)
         obj_list.append(obj)
         parent_tf = obj.GetTransform()
         
     else:
         if val.has_key("translation"):
             translation = map(lambda x: float(x), val["translation"].split(" "))
             parent_tf = tf.concatenate_matrices(parent_tf, tf.compose_matrix(translate = translation))
         if val.has_key("quat"):
             quat = map(lambda x: float(x), val["quat"].split(" "))
             rot = rave.axisAngleFromQuat(quat)
             m2 = tf.compose_matrix(angles = rot)
             parent_tf = tf.concatenate_matrices(parent_tf, m2)     
     
     if first:
         parent_tf = tf.identity_matrix()
         if obj != None:
             obj.SetTransform(parent_tf)
     
     if iter==0:
         return obj_list            
     
     # search for ancestors
     child_expr   = pycassa.create_index_expression('base', key)
     clild_clause = pycassa.create_index_clause([child_expr])
     
     for child_key, _ in self.base.col.get_indexed_slices(clild_clause):
         child_obj = self.getEnv(child_key, iter-1)
         for obj in child_obj:
             if type(obj) != type(None):
                 obj.SetTransform(tf.concatenate_matrices(parent_tf, obj.GetTransform()))
                 obj_list.append(obj)
         
     return obj_list
Beispiel #2
0
    def motion_model(self, f0, f1, stamp, use_kalman=False):
        if not use_kalman:
            # simple `repetition` model
            txn0, rxn0 = f0['pose'][L_POS], f0['pose'][A_POS]
            txn1, rxn1 = f1['pose'][L_POS], f1['pose'][A_POS]
            R0 = tx.euler_matrix(*rxn0)
            R1 = tx.euler_matrix(*rxn1)

            T0 = tx.compose_matrix(angles=rxn0, translate=txn0)
            T1 = tx.compose_matrix(angles=rxn1, translate=txn1)

            Tv = np.dot(T1, vm.inv(T0))  # Tv * T0 = T1
            T2 = np.dot(Tv, T1)

            txn = tx.translation_from_matrix(T2)
            rxn = tx.euler_from_matrix(T2)

            x = f1['pose'].copy()
            P = f1['cov'].copy()
            x[0:3] = txn
            x[9:12] = rxn
            return x, P
        else:
            # dt MUST NOT BE None
            self.kf_.x = f0['pose']
            self.kf_.P = f0['cov']
            dt = (f1['stamp'] - f0['stamp'])
            self.kf_.predict(dt)

            txn, rxn = f1['pose'][L_POS], f1['pose'][A_POS]
            z = np.concatenate([txn, rxn])
            self.kf_.update(z)
            dt = (stamp - f1['stamp'])
            self.kf_.predict(dt)
            return self.kf_.x.copy(), self.kf_.P.copy()
    def pose_callback(self, msg):

        print "pose cb"
        trans = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]
        rot = [
            msg.pose.orientation.x, msg.pose.orientation.y,
            msg.pose.orientation.z, msg.pose.orientation.w
        ]
        pose_matrix = tr.compose_matrix(angles=tr.euler_from_quaternion(rot),
                                        translate=trans)
        # Transfer to the frame of the robot
        # pose_matrix = np.dot(self.config, pose_matrix)

        if self.previous_tf is None:  #if first callback
            odo_trans = trans
            odo_rot = rot
            odo = tr.compose_matrix(angles=tr.euler_from_quaternion(odo_rot),
                                    translate=odo_trans)

            self.origin = trans
        else:
            # calculate limit matrix
            if enable_filter:
                limit_dist = limit_speed * (msg.header.stamp.to_nsec() -
                                            self.previous_time.to_nsec())
                print(msg.header.stamp.to_nsec() -
                      self.previous_time.to_nsec())
                scale, shear, angles, prev_trans, persp = tr.decompose_matrix(
                    self.previous_tf)
                moved_vec = [
                    msg.pose.position.x - prev_trans[0],
                    msg.pose.position.y - prev_trans[1],
                    msg.pose.position.z - prev_trans[2]
                ]
                moved_dist = np.linalg.norm(moved_vec)
                if moved_dist > limit_dist:
                    #discard this pose
                    print "move too much"
                    return

            odo = np.dot(tr.inverse_matrix(self.previous_tf), pose_matrix)
            odo_trans = tf.transformations.translation_from_matrix(odo)
            odo_rot = tf.transformations.quaternion_from_matrix(odo)
        self.previous_time = msg.header.stamp
        self.previous_tf = pose_matrix

        print "x: ", trans[0] - self.origin[0], "y: ", trans[1] - self.origin[
            1], "z: ", trans[2] - self.origin[2]

        robot_odo = PoseStamped()
        robot_odo.header.stamp = msg.header.stamp
        robot_odo.pose.position.x = odo_trans[0]
        robot_odo.pose.position.y = odo_trans[1]
        robot_odo.pose.position.z = odo_trans[2]
        robot_odo.pose.orientation.x = odo_rot[0]
        robot_odo.pose.orientation.y = odo_rot[1]
        robot_odo.pose.orientation.z = odo_rot[2]
        robot_odo.pose.orientation.w = odo_rot[3]

        self.pub_odom.publish(robot_odo)
Beispiel #4
0
def updateStoredTransforms(update):
    global holoTransformList
    global markerTransformList
    global worldTransform
    global offsetTransform

    arrSize = holoTransformList.shape[0]

    #parse input into meaningful values
    xworldT = update[0:3]
    xworldR = update[3:6]
    xworldRQ = TR.quaternion_about_axis(np.sqrt(xworldR.dot(xworldR)), xworldR)
    xoffsetT = update[6:9]
    xoffsetR = update[9:12]
    xoffsetRQ = TR.quaternion_about_axis(np.sqrt(xoffsetR.dot(xoffsetR)),
                                         xoffsetR)

    #update world transform
    worldIncrement = TR.compose_matrix(
        translate=xworldT, angles=TR.euler_from_quaternion(xworldRQ))
    worldTransform = np.dot(worldIncrement, worldTransform)

    #update offset transform
    offsetIncrement = TR.compose_matrix(
        translate=xoffsetT, angles=TR.euler_from_quaternion(xoffsetRQ))
    offsetTransform = np.dot(offsetIncrement, offsetTransform)
    def point_cb(self, msg):
        print "point cb"
        trans = [
            self.pose_msg.pose.position.x / 1000,
            self.pose_msg.pose.position.y / 1000,
            self.pose_msg.pose.position.z / 1000
        ]
        rot = [
            self.pose_msg.pose.orientation.x, self.pose_msg.pose.orientation.y,
            self.pose_msg.pose.orientation.z, self.pose_msg.pose.orientation.w
        ]
        pose_matrix = tr.compose_matrix(angles=tr.euler_from_quaternion(rot),
                                        translate=trans)

        pt_trans = [msg.point.x, msg.point.y, msg.point.z]
        pt_rot = [0, 0, 0, 1]
        pt_matrix = tr.compose_matrix(angles=tr.euler_from_quaternion(pt_rot),
                                      translate=pt_trans)

        obj_pose = np.dot(pose_matrix, pt_matrix)
        obj_position = tf.transformations.translation_from_matrix(obj_pose)
        print "pose = ", self.pose_msg.pose
        print "point = ", msg.point
        print "obj position = ", obj_position
        point_msg = PointStamped()
        point_msg.header.stamp = rospy.Time()
        point_msg.header.seq = msg.header.seq
        point_msg.header.frame_id = "map"
        point_msg.point.x = obj_position[0]
        point_msg.point.y = obj_position[1]
        point_msg.point.z = obj_position[2]
        self.pub_obj_position.publish(point_msg)
Beispiel #6
0
    def addTransforms(self, frame1, frame2):
        """ Adds two transform objects to get a resulting transform. """

        t1 = np.array(
            [frame1.translation.x, frame1.translation.y, frame1.translation.z])
        t2 = np.array(
            [frame2.translation.x, frame2.translation.y, frame2.translation.z])

        r1 = np.array([
            frame1.rotation.x, frame1.rotation.y, frame1.rotation.z,
            frame1.rotation.w
        ])
        r2 = np.array([
            frame2.rotation.x, frame2.rotation.y, frame2.rotation.z,
            frame2.rotation.w
        ])

        f1 = t.compose_matrix(translate=t1, angles=t.euler_from_quaternion(r1))
        f2 = t.compose_matrix(translate=t2, angles=t.euler_from_quaternion(r2))

        f = np.matmul(f1, f2)

        trans = t.translation_from_matrix(f)
        rot = t.quaternion_from_matrix(f)

        trans = Vector3(trans[0], trans[1], trans[2])
        rot = Quaternion(rot[0], rot[1], rot[2], rot[3])

        return Transform(trans, rot)
def angle_diff_from_quaternions(q1, q2):
    tf1 = transformations.compose_matrix(
        angles=transformations.euler_from_quaternion(q1))
    tf2 = transformations.compose_matrix(
        angles=transformations.euler_from_quaternion(q2))
    angle, _, _ = transformations.rotation_from_matrix(
        np.dot(np.linalg.inv(tf1), tf2))
    return angle / np.pi * 180.
 def _get_tf_matrix(self, pose):
     if isinstance(pose, dict):
         _quat = np.array([pose['orientation'].x, pose['orientation'].y, pose['orientation'].z, pose['orientation'].w])
         _trans = np.array([pose['position'].x, pose['position'].y, pose['position'].z])
         return tr.compose_matrix(angles=tr.euler_from_quaternion(_quat,'sxyz'), translate=_trans)
     else:
         _quat = np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
         _trans = np.array([pose.position.x, pose.position.y, pose.position.z])
         return tr.compose_matrix(angles=tr.euler_from_quaternion(_quat,'sxyz'), translate=_trans)
Beispiel #9
0
 def transform_pose_to_base_link(self, t, q):
     euler_pose = tfm.euler_from_quaternion(q)
     tf_cam_col_opt_fram = tfm.compose_matrix(translate=t,
                                              angles=euler_pose)
     trans, quat = self.listener.lookupTransform(
         'base_link', 'camera_color_optical_frame', rospy.Time(0))
     euler = tfm.euler_from_quaternion(quat)
     tf = tfm.compose_matrix(translate=trans, angles=euler)
     t_pose = np.dot(tf, tf_cam_col_opt_fram)
     t_ba_li = t_pose[0:3, 3]
     return t_ba_li
Beispiel #10
0
 def transformOpenrave(self, openrave_obj, complex_data):
     if complex_data.has_key("translation"):
         translation = map(lambda x: float(x), complex_data["translation"].split(" "))
         m1 = openrave_obj.GetTransform()
         m2 = tf.compose_matrix(translate = translation)
         openrave_obj.SetTransform(tf.concatenate_matrices(m1, m2))
     if complex_data.has_key("quat"):
         quat = map(lambda x: float(x), complex_data["quat"].split(" "))
         m1 = openrave_obj.GetTransform()
         rot = rave.axisAngleFromQuat(quat)
         m2 = tf.compose_matrix(angles = rot)
         openrave_obj.SetTransform(tf.concatenate_matrices(m1, m2))    
     return openrave_obj
Beispiel #11
0
def controller():
    rospy.sleep(1)
    print("en el controler")
    k_v = 0.4
    k_w = 1
    x_goal = -3
    y_goal = 2
    theta_goal = 0
    while not rospy.is_shutdown():
        print("true goal:", x_goal, y_goal)
        (roll_bot, pitch_bot, yaw_bot) = euler_from_quaternion([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])
        orient = np.array([roll_bot, pitch_bot, yaw_bot])
        trans = np.array([pose.position.x, pose.position.y, 0])
        T_bot = compose_matrix(angles=orient, translate=trans)
        T_bot_inv = np.linalg.inv(T_bot)

        orient = np.array([0, 0, 0])
        trans = np.array([x_goal, y_goal, 0])
        T_punto = compose_matrix(angles=orient, translate=trans)

        T_punto_bot = np.matmul(T_bot_inv, T_punto)
        punto_x = T_punto_bot[0][3]
        punto_y = T_punto_bot[1][3]
        print("goal from bot:", punto_x, punto_y)

        giro = atan2(punto_y, punto_x)
        print("angulo_a_girar: ", degrees(giro))
        print("\n")

        if ((sqrt((pose.position.x - x_goal)**2 +
                  (pose.position.y - y_goal)**2)) < 0.01):
            twist.linear.x = 0
            twist.angular.z = 0
        else:
            twist.angular.z = k_w * (giro)
            twist.linear.x = k_v * sqrt((pose.position.x - x_goal)**2 +
                                        (pose.position.y - y_goal)**2)

        if (twist.linear.x > 0.3):  #control velocidad
            twist.linear.x = 0.3
        if (twist.angular.z > 1):  #control velocidad angular
            twist.angular.z = 1
        if (twist.angular.z < (-1)):  #control velocidad angular
            twist.angular.z = 1 * -1

        twist_pub.publish(twist)
        print(twist)
        rate.sleep()
Beispiel #12
0
def controller():
    rospy.sleep(1)
    print("en el controler")
    k_v=1;k_w=1;
    # path=[[-0.6,0],[-3.5,0],[-3.5,3.5],[1.5,3.5],[1.5,-1.5],[3.5,-1.5],[3.5,-8.0],[-2.5,-8.0],[-2.5,-5.5],[1.5,-5.5],[1.5,-3.5],[-1,-3.5]]
    # x_goal=path[0][0];y_goal=path[0][1];
    x_goal=traj.poses.pop(0).pose.position.x;y_goal=path[0][1];
    next=0
    while not rospy.is_shutdown():
        print("true goal:", x_goal, y_goal)
        (roll_bot,pitch_bot,yaw_bot)=euler_from_quaternion([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
        trans = np.array([pose.position.x,pose.position.y,0])
        orient = np.array([roll_bot,pitch_bot,yaw_bot])
        T_bot = compose_matrix(angles=orient, translate=trans)
        T_bot_inv=np.linalg.inv(T_bot)

        trans = np.array([x_goal,y_goal,0])
        orient = np.array([0,0,0])
        T_punto = compose_matrix(angles=orient, translate=trans)

        T_punto_bot=np.matmul(T_bot_inv,T_punto)
        punto_x=T_punto_bot[0][3];punto_y=T_punto_bot[1][3]
        # print("goal from bot:", punto_x, punto_y)

        giro=atan2(punto_y,punto_x)
        # print("angulo_a_girar: ", degrees(giro))
        # print("\n")

        if ((sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2))<0.5):
            if (not next>=len(path)):
                x_goal=path[next][0];y_goal=path[next][1];
                # x_goal=traj.poses.pose.point.x;y_goal=traj.poses.pose.point.y
                next=next+1
            else:
                if ((sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2))<0.05):
                    giro=0;pose.position.x=x_goal;pose.position.y=y_goal;

        twist.angular.z=k_w*(giro)
        twist.linear.x=k_v*sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2)

        if(twist.linear.x>0.3): #control velocidad lineal
            twist.linear.x=0.3
        if(twist.angular.z>radians(60)): #control velocidad angular
            twist.angular.z=radians(60)
        if(twist.angular.z<(radians(-60))): #control velocidad angular
            twist.angular.z=radians(-60)
        twist_pub.publish(twist)
        # print(twist)
        rate.sleep()
    def controller(self):
        rospy.sleep(1)
        print("en el controler")
        k_v=1;k_w=1;
        punto=self.traj.poses.pop(0).pose;
        x_goal=punto.position.x
        y_goal=punto.position.y
        self.new=False
        while not rospy.is_shutdown() and not self.new:
            print("true goal:", x_goal, y_goal)
            (roll_bot,pitch_bot,yaw_bot)=euler_from_quaternion([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
            trans = np.array([pose.position.x,pose.position.y,0])
            orient = np.array([roll_bot,pitch_bot,yaw_bot])
            T_bot = compose_matrix(angles=orient, translate=trans)
            T_bot_inv=np.linalg.inv(T_bot)

            trans = np.array([x_goal,y_goal,0])
            orient = np.array([0,0,0])
            T_punto = compose_matrix(angles=orient, translate=trans)

            T_punto_bot=np.matmul(T_bot_inv,T_punto)
            punto_x=T_punto_bot[0][3];punto_y=T_punto_bot[1][3]
            print("goal from bot:", punto_x, punto_y)

            giro=atan2(punto_y,punto_x)
            print("angulo_a_girar: ", degrees(giro))
            # print("\n")

            if ((sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2))<0.5):
                if (len(self.traj.poses)>0):
                    punto=self.traj.poses.pop(0).pose;
                    x_goal=punto.position.x
                    y_goal=punto.position.y
                else:
                    if ((sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2))<0.01):
                        giro=0;pose.position.x=x_goal;pose.position.y=y_goal;

            twist.angular.z=k_w*(giro)
            twist.linear.x=k_v*sqrt((pose.position.x-x_goal)**2+(pose.position.y-y_goal)**2)

            if(twist.linear.x>0.3): #control velocidad lineal
                twist.linear.x=0.3
            if(twist.angular.z>radians(60)): #control velocidad angular
                twist.angular.z=radians(60)
            if(twist.angular.z<(radians(-60))): #control velocidad angular
                twist.angular.z=radians(-60)
            twist_pub.publish(twist)
            print(twist)
            rate.sleep()
    def get_kinematic_chain(self, target, source, reference):
        """
        Gets the pose for all joints in the kinematic chain from source to
        target.
        :param target: name of target joint
        :param source : name of source joint
        :param reference: name of reference joint as needed by ros function
                          listener.chain
        :type target: string
        :type source: string
        :type reference: string
        :return: kinematic chain, transforms as translation and quaternions,
                 transforms as matrices
        :rtype: list of strings, list of transforms, list of numpy matrices
        """
        # All frames here.
        listener = self.listener
        chain = listener.chain(target, rospy.Time(), source, rospy.Time(),
                               reference)
        Ts, TMs = [], []

        np.set_printoptions(precision=4, suppress=True)
        TMcum = np.eye(4, 4)
        for i in range(len(chain) - 1):
            t = listener.lookupTransform(chain[i + 1], chain[i], rospy.Time())
            t = [[np.float64(_) for _ in t[0]], [np.float64(_) for _ in t[1]]]
            t1_euler = tfx.euler_from_quaternion(t[1])
            tm = tfx.compose_matrix(translate=t[0], angles=t1_euler)
            Ts.append(t)
            TMs.append(tm)

            t = listener.lookupTransform(chain[i + 1], chain[0], rospy.Time())
            t1_euler = tfx.euler_from_quaternion(t[1])
            tm = tfx.compose_matrix(translate=t[0], angles=t1_euler)
            TMcum = np.dot(TMs[i], TMcum)
            eye = np.dot(tm, np.linalg.inv(TMcum))
            assert (np.allclose(eye - np.eye(4, 4), np.zeros((4, 4)),
                                atol=0.1))

        # Sanity check to make sure we understand what is happening here.
        # for i in range(len(chain)-1):
        #   t = listener.lookupTransform(chain[i+1], chain[0], rospy.Time(0))
        #   tm = tfx.compose_matrix(translate=t[0], angles=t[1])
        #   TMcum = np.dot(TMs[i], TMcum)
        #   eye = np.dot(tm, np.linalg.inv(TMcum))
        #   print(eye-np.eye(4,4))
        # assert(np.allclose(eye-np.eye(4,4), np.zeros((4,4)), atol=1e-2))
        return chain, Ts, TMs
def parseUrdfJoint(xml, jointNames, jonintTree):
    '''获取每个关节的实际位姿'''
    matrix = OrderedDict()
    result = OrderedDict()
    qmatrix = OrderedDict()
    # axises=[]
    #获取关节参数
    for jname in jointNames:
        jointNode = xml.xpath("//joint[@name='%s']" % jname)[0]
        originNode = jointNode.find("origin")
        axisNode = jointNode.find("axis")
        origin_xyz = [0.0, 0.0, 0.0] if originNode is None else map(
            float, originNode.attrib['xyz'].split())
        origin_rpy = [0.0, 0.0, 0.0] if originNode is None else map(
            float, originNode.attrib['rpy'].split())
        # axis_xyz=[0.0,0.0,1.0] if axisNode is None else  map(float,axisNode.attrib['xyz'].split())
        mat = compose_matrix(angles=origin_rpy, translate=origin_xyz)
        pname = getParentJointName(jonintTree, jname)  #获取当前关节的父关节名称
        if pname is None:
            matrix[jname] = mat
        else:
            matrix[jname] = matrix[pname].dot(mat)
        scale, shear, angles, translate, perspective = decompose_matrix(
            matrix[jname])
        result[jname] = [
            np.array(translate).round(8),
            np.array(angles).round(8)
        ]
        qmatrix[jname] = [
            translate.round(8),
            quaternion_from_euler(*angles).round(8)
        ]
        # axises.append(axis_xyz)
    return result
    def __init__(self, robot, remove_object_model_func):
        Task2State.__init__(
            self,
            robot,
            outcomes=['finish', 'continue'],
            input_keys=['object_count', 'orig_channel_xy_yaw'],
            output_keys=['object_count', 'orig_channel_xy_yaw'])

        self.remove_object_model_func = remove_object_model_func
        self.global_place_channel_z = rospy.get_param(
            '~global_place_channel_z')
        self.global_place_channel_yaw = rospy.get_param(
            '~global_place_channel_yaw')
        self.place_z_margin = rospy.get_param('~place_z_margin')
        self.place_z_offset = rospy.get_param('~place_z_offset')
        self.object_num = rospy.get_param('~object_num')
        self.skip_ungrasp = rospy.get_param('~skip_ungrasp', False)
        self.do_channel_recognition = rospy.get_param(
            '~do_channel_recognition')
        self.single_object_mode = rospy.get_param('~single_object_mode')

        grasping_point = rospy.get_param('~grasping_point')
        self.grasping_yaw = rospy.get_param('~grasping_yaw')
        self.grasping_point_coords = tft.compose_matrix(
            translate=[
                grasping_point[0], grasping_point[1], grasping_point[2]
            ],
            angles=[0, 0, self.grasping_yaw])
Beispiel #17
0
    def trajectory(self, node: str) -> List[Dict[str, List]]:
        # collect all timed nodes corresponding to the node to track
        traj = []
        for nname, ndata in self._graph.nodes(data=True):
            if ndata['__name__'] == node:
                # Filter out Duckiebot nodes that are not connected to
                # another Duckiebot node
                dbot_type = AutolabReferenceFrame.TYPE_DUCKIEBOT_FOOTPRINT
                if ndata["type"] == dbot_type:
                    if not self._graph.has_neighbor_of_type(nname, dbot_type):
                        continue

                T = tr.compose_matrix(translate=ndata["pose"].t,
                                      angles=tr.euler_from_quaternion(
                                          ndata["pose"].q))
                traj.append({
                    'timestamp':
                    ndata["time"],
                    'pose':
                    T.tolist(),
                    # TODO: this is a hack, should be done properly
                    'observers':
                    list({
                        p[0]
                        for e in self._graph.out_edges(nname)
                        for p in self._graph.in_edges(e[1])
                    } if 'footprint' in nname else {})
                })
        # sort trajectory by time
        strategy = lambda tf: tf['timestamp']
        traj = sorted(traj, key=strategy)
        # ---
        return traj
    def load_config(self):

        anchor_data = yaml.load(file(rospkg.RosPack().get_path('odom_uwb')+"/config/"+"husky.yaml",'r')) # Need RosPack get_path to find the file path
        trans = anchor_data['trans']
        rot = anchor_data['rot']

        return tr.compose_matrix(angles = rot, translate = trans)
Beispiel #19
0
def filterPointCloud(bin_num, pts, source_pc2_kinect_header, height=None, width=None, retPointCloud=True):
    # input height/width if you need filtered uv
    with Timer('b1'):
        global tfListener
        source_pc_kinect = PointCloud()
        source_pc_kinect.header = source_pc2_kinect_header
    with Timer('b2'):
        source_pc_kinect.points = pts
    
    # transform point cloud from kinect frmae to shelf frame
    with Timer('b3'):
        import tf.transformations as tfm
        import numpy as np
        (pos, ori) = tfListener.lookupTransform('/shelf', source_pc_kinect.header.frame_id, source_pc_kinect.header.stamp)
        
        points = []
        T = np.dot(tfm.compose_matrix(translate=pos) , tfm.quaternion_matrix(ori) )

        for pt in pts:
            #tmp = np.dot(T, np.array(pt+[1])).tolist()
            points.append( np.dot(T, np.array(pt+[1])).tolist() )
            
    pts = []
    cnt = 0
    
    with Timer('b4'):
        if height and width:
            filtered_uvs = []
            filtered_uvs_mask = [False for i in xrange(height*width)]
            for point in points:  # change source to source_pc_kinect
                if ~math.isnan(point[0]) and inside_bin(point, bin_num):
                    pts.append(point)   # can be removed becuase we only need mask
                    filtered_uvs_mask[cnt] = True
                    filtered_uvs.append([cnt//width, cnt%width])
                cnt += 1
        else:
            for point in points:  # change source to source_pc_kinect
                if inside_bin(point, bin_num):
                    pts.append(point)
    
    if retPointCloud:
        with Timer('b5'):
            filtered_pc_shelf = PointCloud()
            filtered_pc_shelf.header = source_pc2_kinect_header
            filtered_pc_shelf.header.frame_id = '/shelf'
            filtered_pc_shelf.points = [Point32(pt[0], pt[1], pt[2]) for pt in pts]
            
            # render filtered point cloud
            filtered_pc_map = tfListener.transformPointCloud('/map', filtered_pc_shelf)
            createPointsMarker(filtered_pc_map.points, marker_id=2, frame_id='/map', rgba=(0,1,0,1)) #points are in Point32
        
    
    with Timer('b6'):
        if height and width:
            if retPointCloud:
                return (filtered_pc_map, filtered_uvs, filtered_uvs_mask)
            else:
                return filtered_uvs_mask
        else:
            return filtered_pc_map
Beispiel #20
0
def clickedCB(data):
    global askedMarkers
    global listener
    global worldTransform
    global measuredMarkers
    global clickNum
    print 'clicked'
    try:
        (tipTrans, tipRot) = listener.lookupTransform('/mocha_world', '/tip',
                                                      rospy.Time(0))
        measuredMarkers[clickNum, :] = tipTrans

        clickNum = clickNum + 1
        if clickNum == 8:
            clickNum = 0
            print 'finished clicking'
            print 'AskedMarkers:'
            print askedMarkers
            print 'measured markers:'
            print measuredMarkers
            (worldRotupdate,
             worldTransUpdate) = rigid_transform_3D(measuredMarkers,
                                                    askedMarkers)
            worldRot4 = np.identity(4)
            for i in range(3):
                worldRot4[(0, 1, 2), i] = worldRotupdate[:, i]
            worldTransformUpdate = TR.compose_matrix(
                translate=worldTransUpdate,
                angles=TR.euler_from_matrix(worldRot4))
            worldTransformationUpdateInv = TR.inverse_matrix(
                worldTransformUpdate)
            worldTransform = np.dot(worldTransformationUpdateInv,
                                    worldTransform)
    except ():
        nothing = 0
    def __init__(self, robot, add_object_model_func):
        Task2State.__init__(self,
                            robot,
                            outcomes=['succeeded', 'failed'],
                            input_keys=['object_count'],
                            output_keys=['object_count'])

        self.add_object_model_func = add_object_model_func
        self.do_object_recognition = rospy.get_param('~do_object_recognition')
        self.joint_torque_thresh = rospy.get_param('~joint_torque_thresh')
        self.skip_grasp = rospy.get_param('~skip_grasp', False)
        self.object_grasping_height = rospy.get_param(
            '~object_grasping_height')
        self.object_interval = rospy.get_param('~object_interval')
        self.lane_interval = rospy.get_param('~lane_interval')
        self.global_first_object_pos = rospy.get_param(
            '~global_first_object_pos')
        self.global_object_yaw = rospy.get_param('~global_object_yaw')
        self.grasp_land_mode = rospy.get_param('~grasp_land_mode')
        self.reset_realsense_odom = rospy.get_param('~reset_realsense_odom')
        self.stop_if_grasp_failed = rospy.get_param('~stop_if_grasp_failed')
        self.reset_realsense_client = rospy.ServiceProxy(
            '/realsense1/odom/reset', std_srvs.srv.Empty)
        grasping_point = rospy.get_param('~grasping_point')
        grasping_yaw = rospy.get_param('~grasping_yaw')
        self.grasping_point_coords = tft.compose_matrix(
            translate=[
                grasping_point[0], grasping_point[1], grasping_point[2]
            ],
            angles=[0, 0, grasping_yaw])
        self.grasp_force_landing_mode = rospy.get_param(
            '~grasp_force_landing_mode')
        self.force_landing_height = rospy.get_param('~force_landing_height')
 def ref_pose_cb(self, some_pose): # Takes target pose, returns ref se3
     rospy.logdebug("ref_pose_cb called in velocity_control.py")
     p = np.array([some_pose.position.x, some_pose.position.y, some_pose.position.z])
     quat = np.array([some_pose.orientation.x, some_pose.orientation.y, some_pose.orientation.z, some_pose.orientation.w])
     goal_tmp = tr.compose_matrix(angles=tr.euler_from_quaternion(quat, 'sxyz'), translate=p)
     with self.mutex:
         self.T_goal = goal_tmp
    def __init__(self, robot):
        Task2State.__init__(
            self,
            robot,
            outcomes=['succeeded', 'failed', 'adjust_again'],
            input_keys=['orig_global_trans', 'search_count', 'search_failed'],
            output_keys=['orig_global_trans', 'search_count', 'search_failed'])

        self.skip_adjust_grasp_position = rospy.get_param(
            '~skip_adjust_grasp_position', False)
        self.do_object_recognition = rospy.get_param('~do_object_recognition')
        self.object_yaw_thresh = rospy.get_param('~object_yaw_thresh')
        self.global_object_yaw = rospy.get_param('~global_object_yaw')
        self.grasping_yaw = rospy.get_param('~grasping_yaw')
        self.recognition_wait = rospy.get_param('~recognition_wait')
        self.adjust_grasp_image_type = rospy.get_param(
            '~adjust_grasp_image_type', 'depth')
        self.object_pos_thresh = rospy.get_param('~object_pos_thresh')

        grasping_point = rospy.get_param('~grasping_point')
        grasping_yaw = rospy.get_param('~grasping_yaw')
        self.grasping_point_coords = tft.compose_matrix(
            translate=[
                grasping_point[0], grasping_point[1], grasping_point[2]
            ],
            angles=[0, 0, grasping_yaw])

        self.object_pose_sub = rospy.Subscriber(
            'rectangle_detection_' + self.adjust_grasp_image_type +
            '/target_object_' + self.adjust_grasp_image_type, PoseArray,
            self.objectPoseCallback)
        self.object_pose = PoseArray()
def read_pos(t, link_states, recorder_pos, joint_states, readLinkState):
#pub_foot_z, pub_body_z):

    ## Initialization
    from geometry_msgs.msg import Wrench, Vector3
    from std_msgs.msg import Float64
    import rospy
    import math
    from numpy.linalg import norm
    from gazebo_msgs.srv import GetLinkState
    from tf.transformations import euler_from_quaternion, quaternion_from_euler, compose_matrix, quaternion_matrix

    if readLinkState.value is None:
        readLinkState.value = rospy.ServiceProxy("/gazebo/get_link_state", GetLinkState)

    ## set time from which recording of data (for CSV) should start
    record_time = 0.0

    # get current angle
    phi1 = joint_states.value.position[1]
    phi3 = -joint_states.value.position[3]

    ## definitions according to matlab script/ RAL paper, defined solely for comparison
    offset_q1 = -40*math.pi/180
    offset_q2 = 40*math.pi/180
    q1 = phi1+offset_q1
    q3 = phi3+offset_q2

    body_pos = readLinkState.value('base_link', 'link')
    body_z = body_pos.link_state.pose.position.z
    body_x = body_pos.link_state.pose.position.y

    lower_leg_pos = readLinkState.value('link2_link', 'link')
    (knee_x, knee_y, knee_z) = [lower_leg_pos.link_state.pose.position.x, lower_leg_pos.link_state.pose.position.y, lower_leg_pos.link_state.pose.position.z]
    orientation_q = lower_leg_pos.link_state.pose.orientation
    orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
    #matrix_k = quaternion_matrix(orientation_list)
    #matrix_k[:3,3] = [knee_x, knee_y, knee_z]
    (roll, pitch, yaw) = euler_from_quaternion (orientation_list)
    matrix_knee = compose_matrix(scale=None, shear=None, angles=[roll, pitch, yaw], translate=[knee_x, knee_y, knee_z], perspective=None)
    matrix_kf = compose_matrix(scale=None, shear=None, angles=[0, 0, 0], translate=[0, 0.08, 0], perspective=None)
    matrix_foot = np.dot(matrix_knee, matrix_kf)

    foot_z = matrix_foot[2][3]#-0.0239

    if t > record_time:
        recorder_pos.record_entry(t, body_x, body_z, foot_z, q1, q3, phi1, phi3)
Beispiel #25
0
def se3_to_T(rvec, tvec):
    """Convert an se(3) OpenCV pose to a 4x4 transformation matrix.

    OpenCV poses are an Euler-Rodrigues rotation vector and a translation vector.
    """
    R, _ = cv2.Rodrigues(rvec)
    euler_angles = transformations.euler_from_matrix(R)
    return transformations.compose_matrix(angles=euler_angles, translate=np.squeeze(tvec))
 def ref_poseCB(self, goal_pose): # Takes target pose, returns ref se3
     rospy.logdebug("ref_pose_cb called in velocity_control.py")
     # p = np.array([some_pose.position.x, some_pose.position.y, some_pose.position.z])
     p = np.array([goal_pose.position.x, goal_pose.position.y + 0.04, goal_pose.position.z])
     quat = np.array([goal_pose.orientation.x, goal_pose.orientation.y, goal_pose.orientation.z, goal_pose.orientation.w])
     goal_tmp = tr.compose_matrix(angles=tr.euler_from_quaternion(quat, 'sxyz'), translate=p) # frame is spatial 'sxyz', return Euler angle from quaternion for specified axis sequence.
     with self.mutex:
         self.original_goal = goal_tmp
Beispiel #27
0
def tf_to_T(tf):
    """Convert a (tx, ty, tz, rx, ry, rz) tuple to a 4x4 transformation matrix."""
    if len(tf) == 6:
        # Single transform
        return transformations.compose_matrix(angles=tf[3:], translate=tf[:3])
    else:
        # Array of transforms
        assert tf.shape[1] == 6, "Transform array must be Nx6"
        return np.vstack([tf_to_T(row) for row in tf])
Beispiel #28
0
def evaluate(t_rc, camera_rb_poses, all_image_coords, all_calibration_markers,
             t_co, camera_mat, distortion_coeffs):
    # Get the optical frame pose in the mocap frame
    t_rc = transformations.compose_matrix(translate=t_rc[:3],
                                          angles=(t_rc[3:]))
    t_mos = calculate_t_mos(camera_rb_poses, t_rc, t_co)

    shape = (8, 5)

    square_size = 0.035
    marker_z = 0.012
    marker_positions = [
        (-square_size, 0, -marker_z),
        (square_size * 8, 0, -marker_z),
        (square_size * 8, square_size * 5, -marker_z),
        (-square_size, square_size * 5, -marker_z),
    ]

    total_error = 0
    for t_mo, image_coords, calibration_markers in zip(
            t_mos, all_image_coords, all_calibration_markers):
        checkerboard_coords = np.zeros((shape[0] * shape[1], 3), np.float32)
        checkerboard_coords[:, :2] = (
            np.mgrid[0:shape[0], 0:shape[1]].T.reshape(-1, 2)) * square_size

        retval, rvec, tvec = cv2.solvePnP(checkerboard_coords, image_coords,
                                          camera_mat, distortion_coeffs)
        t_o_ch = calibration_common.se3_to_T(rvec, tvec)
        t_m_ch = np.matmul(t_mo, t_o_ch)
        total_distance = 0
        for marker in marker_positions:
            marker = np.matmul(t_m_ch, np.append(marker, 1))[:3]
            match, distance = find_closest_marker_distance(
                marker, calibration_markers)
            print(match[2] - marker[2])
            total_distance += distance
        total_distance /= 4
        print("*" * 80)
        # To handle when the checkerboard was flipped during calibration
        checkerboard_coords = checkerboard_coords[::-1]
        retval, rvec, tvec = cv2.solvePnP(checkerboard_coords, image_coords,
                                          camera_mat, distortion_coeffs)
        t_o_ch = calibration_common.se3_to_T(rvec, tvec)
        t_m_ch = np.matmul(t_mo, t_o_ch)
        other_total_distance = 0
        for marker in marker_positions:
            marker = np.matmul(t_m_ch, np.append(marker, 1))[:3]
            match, distance = find_closest_marker_distance(
                marker, calibration_markers)
            other_total_distance += distance
            print(match[2] - marker[2])
        other_total_distance /= 4

        if other_total_distance < total_distance:
            total_distance = other_total_distance
        total_error += total_distance
    return total_error / len(t_mos)
Beispiel #29
0
def _inverse_tuples(t):
    trans, rot = t
    euler = tr.euler_from_quaternion(rot)
    m = tr.compose_matrix(translate=trans, angles=euler)
    m_inv = tr.inverse_matrix(m)
    trans_inv = tr.translation_from_matrix(m_inv)
    rot_inv = tr.rotation_matrix(*tr.rotation_from_matrix(m_inv))
    quat_inv = tr.quaternion_from_matrix(rot_inv)
    return (trans_inv, quat_inv)
Beispiel #30
0
def matrix_from_xyzquat_np_array(arg1, arg2=None):
    if arg2 is not None:
        translate = arg1
        quaternion = arg2
    else:
        translate = arg1[0:3]
        quaternion = arg1[3:7]

    return np.dot(compose_matrix(translate=translate),quaternion_matrix(quaternion))
Beispiel #31
0
 def kinect_to_base(self,kinect_pose):
     try:
         time.sleep(1)
         (self.transl,self.quat)=self.listener.lookupTransform('base','kinect',rospy.Time(0))
         self.rot = euler_from_quaternion(self.quat)
         self.tf_SE3 = compose_matrix(angles=self.rot,translate = self.transl)
         print self.tf_SE3
         base_pos=numpy.dot(self.tf_SE3,kinect_pose)
         print base_pos
         # print self.cmd_pos
     except (tf.Exception):
         rospy.logerr("Could not transform from "\
                      "{0:s} to {1:s}".format(base,kinect))
         pass
     return base_pos
def animate_2dsynced(data, shape_id, figfname):
    fig, ax = plt.subplots()
    fig.set_size_inches((7,7))
    probe_radius = 0.004745   # probe1: 0.00626/2 probe2: 0.004745
    
    sub = 1                 # subsample rate
    tip_pose = data['tip_poses_2d']
    object_pose = data['object_poses_2d']
    force = data['force_2d']
    
    patches = []
    
    
    # add the object as polygon
    shape_db = ShapeDB()
    shape_polygon = shape_db.shape_db[shape_id]['shape'] # shape of the objects presented as polygon.
    shape_polygon_3d = np.hstack((np.array(shape_polygon), np.zeros((len(shape_polygon), 1)), np.ones((len(shape_polygon), 1))))
    

    print 'object_pose', len(object_pose), 'tip_pose', len(tip_pose), 'force', len(force)
    plt.ion()
    for i in (range(0, len(tip_pose), sub)):
        
        plt.cla()
        T = tfm.compose_matrix(translate = object_pose[i][0:2] + [0], angles = (0,0,object_pose[i][2]) )
        shape_polygon_3d_world = np.dot(T, shape_polygon_3d.T)
        
        obj = mpatches.Polygon(shape_polygon_3d_world.T[:,0:2], closed=True, color='blue', alpha=0.05)
        ax.add_patch(obj)
    
        # add the probes as circle
        circle = mpatches.Circle(tip_pose[i][0:2], probe_radius, ec="none", color='red', alpha=0.5)
        ax.add_patch(circle)
        
        # add the force
        ax.arrow(tip_pose[i][0], tip_pose[i][1], force[i][0]/100, force[i][1]/100, head_width=0.005, head_length=0.01, fc='k', ec='k')
        
        #arrow = mpatches.Arrow(tip_pose[i][0], tip_pose[i][1], force[i][0],
        #                force[i][1], head_width=0.05, head_length=0.1, fc='k', ec='k')
        #ax.add_patch(arrow)
        
        # render it
        plt.axis([-0.3, 0.3, -0.3, 0.3])
        #plt.axis('equal')
        plt.draw()
        #time.sleep(0.1)
    plt.show()
Beispiel #33
0
def poseTomatrix(pose):
    """Converts a Pose message to a matrix 4.
    """
    assert isinstance(pose, Pose)
    
    T = ((pose.position.x,
                                              pose.position.y,
                                              pose.position.z, )
                                             )
    R = transformations.euler_from_quaternion((
                            pose.orientation.x,
                            pose.orientation.y,
                            pose.orientation.z,
                            pose.orientation.w)
                 )
    
    return transformations.compose_matrix(translate=T,
                                          angles=R)
 def goal_from_tag_pose(self, tag_pose):
     tf_mat = tft.compose_matrix(angles=self.grasp_transform[3:])
     tag_q = (tag_pose.pose.orientation.x, tag_pose.pose.orientation.y,
                 tag_pose.pose.orientation.z, tag_pose.pose.orientation.w)
     tag_mat = tft.quaternion_matrix(tag_q)
     new_mat = np.dot(tag_mat, tf_mat)
     dxyz = self.grasp_transform[:3]
     dxyz.append(1)
     d_xyz_base = np.matrix(dxyz).T
     d_xyz_tag = np.dot(tag_mat, d_xyz_base).A1[:3].tolist()
     new_q = tft.quaternion_from_matrix(new_mat)
     grasp_pose = PoseStamped()
     grasp_pose.header.stamp = rospy.Time.now()
     grasp_pose.header.frame_id = tag_pose.header.frame_id
     grasp_pose.pose.position.x = tag_pose.pose.position.x + d_xyz_tag[0]
     grasp_pose.pose.position.y = tag_pose.pose.position.y + d_xyz_tag[1]
     grasp_pose.pose.position.z = tag_pose.pose.position.z + d_xyz_tag[2]
     grasp_pose.pose.orientation = Quaternion(*new_q)
     self.test_pub_2.publish(grasp_pose)
     return grasp_pose
	def tf_base_to_stylus (self):

		try:
			(self.transl,self.quat) = self.tfListener.lookupTransform('base','stylus',rospy.Time(0))
			#lookupTransrom is a method which returns the transfrom between two coordinate frames. 
			#lookupTransfrom returns a translation and a quaternion
			self.rot= euler_from_quaternion(self.quat) #Get euler angles from the quaternion

			self.tf_SE3 = compose_matrix(angles=self.rot,translate=self.transl)

			#Store the transformation in a format compatible with gemoetr_msgs/Twist
			self.transf = Twist(Vector3(self.transl[0],self.transl[1],self.transl[2]),(Vector3(self.rot[0],self.rot[1],self.rot[2])))
			#Publish the transformation
			self.Tsb.publish(self.transf)

		except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
			self.tf_SE3 = None
			pass
			#If exceptions occur, skip trying to lookup the transform. 
			#It is encouraged to publish a logging message to rosout with rospy. 
			#i.e: 
			#rospy.logerr("Could not transform from %s to %s,"base","stylus")

		return
 def params_to_matrix(self, params):
     # uses euler angles.... assumes we aren't near singularities
     translate = params[0:3]
     angles = params[3:6]
     return transformations.compose_matrix(translate=translate, angles=angles)
Beispiel #37
0
def matrix_from_xyzrpy(translate, rpy):
    return np.dot(tfm.compose_matrix(translate=translate) , 
                   tfm.euler_matrix(*rpy)).tolist()
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException, tf.Exception) as e:
            rospy.logwarn("Could not find transformation: " + str(e))
            continue

        # rospy.loginfo("Got the matrix:\n%s", transformation_matrix)

        translation = transformation_matrix[:3, 3]
        rotation = transformation_matrix[:3, :3]
        permutation_matrix = numpy.array([[0, 0., -1],
                                          [-1, 0,  0],
                                          [0, 1,  0]])
        new_rotation = permutation_matrix.dot(rotation)

        R = numpy.eye(4)
        R[:3, :3] = new_rotation
        T1 = transformations.compose_matrix(translate= -translation)
        T2 = transformations.compose_matrix(translate=translation)



        # full_transformation_matrix = T1.dot(R).dot(T2)
        full_transformation_matrix = T2.dot(R).dot(T1)

        # rospy.loginfo("Full transformation matrix:\n%s", full_transformation_matrix)
        resulting_matrix = full_transformation_matrix.dot(transformation_matrix)
        # rospy.loginfo("The result is:\n%s", resulting_matrix)

        full_translation = transformations.translation_from_matrix(resulting_matrix)
        full_rotation_q = transformations.quaternion_from_matrix(resulting_matrix)

        br.sendTransform(full_translation, full_rotation_q, hdr.stamp, "flipped_frame", parent_frame)
Beispiel #39
0
 def plan(self):
     # plan() return an object includes attributes
     #   q_traj,
     #   snopt_info_iktraj, 
     #   infeasible_constraint_iktraj 
     #   snopt_info_ik
     
     # 1. Get a handle for the service
     #    todo: makes warning if ikTrajServer does not exist
     ikTrajServer_pub = rospy.Publisher('/ikTrajServer', std_msgs.msg.String, queue_size=100)
     
     argin = {}
     # 2. prepare input arguments        
     if self.q0 is not None: 
         argin['q0'] = self.q0
     else:
         # get robot joints from published topic  # sensor_msgs/JointState
         while True:
             APCrobotjoints = ROS_Wait_For_Msg(self.joint_topic, sensor_msgs.msg.JointState).getmsg() 
             q0 = APCrobotjoints.position
             if len(q0) < 6:
                 continue
             else:
                 argin['q0'] = q0
                 break
         
     # 2.1 transform tip pose to hand pose    
     if self.target_tip_pos is not None: 
         tip_hand_tfm_mat = np.dot( tfm.compose_matrix(translate=self.tip_hand_transform[0:3]),
                                    tfm.compose_matrix(angles=self.tip_hand_transform[3:6]) )
         tip_world_rot_mat = tfm.quaternion_matrix(self.target_tip_ori)
         tip_world_tfm_mat = np.dot(tfm.compose_matrix(translate=self.target_tip_pos) , tip_world_rot_mat)
         hand_tip_tfm_mat = np.linalg.inv(tip_hand_tfm_mat)
         hand_world_tfm_mat = np.dot(tip_world_tfm_mat, hand_tip_tfm_mat)
         target_hand_pos = tfm.translation_from_matrix(hand_world_tfm_mat)
         
         argin['target_hand_pos'] =  target_hand_pos.tolist()
         
     if self.target_tip_ori is not None: 
         tip_hand_tfm_mat = tfm.compose_matrix(angles=self.tip_hand_transform[3:6])
         tip_world_rot_mat = tfm.quaternion_matrix(self.target_tip_ori)
         hand_tip_tfm_mat = np.linalg.inv(tip_hand_tfm_mat)
         hand_world_tfm_mat = np.dot(tip_world_tfm_mat, hand_tip_tfm_mat)
         target_hand_ori = tfm.quaternion_from_matrix(hand_world_tfm_mat)
         
         argin['target_hand_ori'] = roshelper.ros2matlabQuat(target_hand_ori.tolist())  # tolist: so that its serializable
     
     # 2.2 prepare other options
     argin['straightness'] = self.straightness
     argin['target_link'] = self.target_link
     argin['pos_tol'] = self.pos_tol
     argin['ori_tol'] = self.ori_tol
     
     if self.inframebb is not None:
         argin['inframebb'] = self.inframebb
     
     if self.target_joint_bb is not None:
         argin['target_joint_bb'] = self.target_joint_bb
     
     argin['N'] = self.N
     
     argin['tip_hand_transform'] = self.tip_hand_transform
     
     if self.ik_only:
         argin['ik_only'] = 1
     else:
         argin['ik_only'] = 0
     
     argin_json = json.dumps(argin)                    # convert to json format and 
     
     
     for i in range(10):
         # 3. call it
         #print 'calling matlab drake...'
         argin_json_ = argin_json + '\n'
         
         try:
             tn = telnetlib.Telnet(self.ikServerAddress[0], self.ikServerAddress[1])
             
             time.sleep(0.05)
             tn.write(argin_json_)
         except socket.error as inst:
             print '[Drake] Matlab server not up now.'
             if i==9:
                 return None
             continue
     
         time.sleep(0.05)
         
         # 4. parse output arguments
         try:
             ret_json = tn.read_all()
             ret = json.loads(ret_json)  # convert from json to struct
             break
         except:
             print '[Drake] read_all() failed, try again.'
             if i==9:
                 return None
             continue
     
     return Plan(ret)
def matrix_from_xyzquat(translate, quaternion):
    return np.dot(tfm.compose_matrix(translate=translate) , 
                   tfm.quaternion_matrix(quaternion)).tolist()